ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40df4f4aa59fa5821f2aab5c87123b73d77fa4f | import numpy as np
import torch
import torch.nn as nn
eps = 1e-6
def calc_iou(a, b):
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = torch.min(torch.unsqueeze(a[:, 2], dim=1), b[:, 2]) - torch.max(
torch.unsqueeze(a[:, 0], 1), b[:, 0]
)
ih = torch.min(torch.unsqueeze(a[:, 3], dim=1), b[:, 3]) - torch.max(
torch.unsqueeze(a[:, 1], 1), b[:, 1]
)
iw = torch.clamp(iw, min=0)
ih = torch.clamp(ih, min=0)
ua = (
torch.unsqueeze((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), dim=1)
+ area
- iw * ih
)
ua = torch.clamp(ua, min=1e-6)
intersection = iw * ih
IoU = intersection / (ua + eps)
return IoU
class FocalLoss(nn.Module):
# def __init__(self):
def forward(self, classifications, regressions, anchors, annotations):
alpha = 0.25
gamma = 2.0
batch_size = classifications.shape[0]
classification_losses = []
regression_losses = []
anchor = anchors[0, :, :]
anchor_widths = anchor[:, 2] - anchor[:, 0]
anchor_heights = anchor[:, 3] - anchor[:, 1]
anchor_ctr_x = anchor[:, 0] + 0.5 * anchor_widths
anchor_ctr_y = anchor[:, 1] + 0.5 * anchor_heights
for j in range(batch_size):
classification = classifications[j, :, :]
regression = regressions[j, :, :]
bbox_annotation = annotations[j, :, :]
bbox_annotation = bbox_annotation[bbox_annotation[:, 4] != -1]
classification = torch.clamp(classification, 1e-4, 1.0 - 1e-4)
if bbox_annotation.shape[0] == 0:
# print("here")
if torch.cuda.is_available():
alpha_factor = torch.ones(classification.shape).cuda() * alpha
alpha_factor = 1.0 - alpha_factor
focal_weight = classification
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(torch.log(1.0 - classification))
# cls_loss = focal_weight * torch.pow(bce, gamma)
cls_loss = focal_weight * bce
classification_losses.append(cls_loss.sum())
regression_losses.append(torch.tensor(0).float().cuda())
else:
alpha_factor = torch.ones(classification.shape) * alpha
alpha_factor = 1.0 - alpha_factor
focal_weight = classification
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(torch.log(1.0 - classification))
# cls_loss = focal_weight * torch.pow(bce, gamma)
cls_loss = focal_weight * bce
classification_losses.append(cls_loss.sum())
regression_losses.append(torch.tensor(0).float())
continue
IoU = calc_iou(
anchors[0, :, :], bbox_annotation[:, :4]
) # num_anchors x num_annotations
IoU_max, IoU_argmax = torch.max(IoU, dim=1) # num_anchors x 1
# import pdb
# pdb.set_trace()
# compute the loss for classification
targets = torch.ones(classification.shape) * -1
if torch.cuda.is_available():
targets = targets.cuda()
targets[torch.lt(IoU_max, 0.4), :] = 0
positive_indices = torch.ge(IoU_max, 0.5)
num_positive_anchors = positive_indices.sum()
assigned_annotations = bbox_annotation[IoU_argmax, :]
targets[positive_indices, :] = 0
targets[
positive_indices, assigned_annotations[positive_indices, 4].long()
] = 1
if torch.cuda.is_available():
alpha_factor = torch.ones(targets.shape).cuda() * alpha
else:
alpha_factor = torch.ones(targets.shape) * alpha
alpha_factor = torch.where(
torch.eq(targets, 1.0), alpha_factor, 1.0 - alpha_factor
)
focal_weight = torch.where(
torch.eq(targets, 1.0), 1.0 - classification, classification
)
focal_weight = alpha_factor * torch.pow(focal_weight, gamma)
bce = -(
targets * torch.log(classification)
+ (1.0 - targets) * torch.log(1.0 - classification)
)
# cls_loss = focal_weight * torch.pow(bce, gamma)
cls_loss = focal_weight * bce
if torch.cuda.is_available():
cls_loss = torch.where(
torch.ne(targets, -1.0),
cls_loss,
torch.zeros(cls_loss.shape).cuda(),
)
else:
cls_loss = torch.where(
torch.ne(targets, -1.0), cls_loss, torch.zeros(cls_loss.shape)
)
classification_losses.append(
cls_loss.sum() / torch.clamp(num_positive_anchors.float(), min=1.0)
)
# compute the loss for regression
if positive_indices.sum() > 0:
assigned_annotations = assigned_annotations[positive_indices, :]
anchor_widths_pi = anchor_widths[positive_indices]
anchor_heights_pi = anchor_heights[positive_indices]
anchor_ctr_x_pi = anchor_ctr_x[positive_indices]
anchor_ctr_y_pi = anchor_ctr_y[positive_indices]
gt_widths = assigned_annotations[:, 2] - assigned_annotations[:, 0]
gt_heights = assigned_annotations[:, 3] - assigned_annotations[:, 1]
gt_ctr_x = assigned_annotations[:, 0] + 0.5 * gt_widths
gt_ctr_y = assigned_annotations[:, 1] + 0.5 * gt_heights
# clip widths to 1
gt_widths = torch.clamp(gt_widths, min=1)
gt_heights = torch.clamp(gt_heights, min=1)
targets_dx = (gt_ctr_x - anchor_ctr_x_pi) / anchor_widths_pi
targets_dy = (gt_ctr_y - anchor_ctr_y_pi) / anchor_heights_pi
targets_dw = torch.log(gt_widths / anchor_widths_pi)
targets_dh = torch.log(gt_heights / anchor_heights_pi)
targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh))
targets = targets.t()
if torch.cuda.is_available():
targets = targets / torch.Tensor([[0.1, 0.1, 0.2, 0.2]]).cuda()
else:
targets = targets / torch.Tensor([[0.1, 0.1, 0.2, 0.2]])
negative_indices = 1 + (~positive_indices)
regression_diff = torch.abs(targets - regression[positive_indices, :])
regression_loss = torch.where(
torch.le(regression_diff, 1.0 / 9.0),
0.5 * 9.0 * torch.pow(regression_diff, 2),
regression_diff - 0.5 / 9.0,
)
regression_losses.append(regression_loss.mean())
else:
if torch.cuda.is_available():
regression_losses.append(torch.tensor(0).float().cuda())
else:
regression_losses.append(torch.tensor(0).float())
return (
torch.stack(classification_losses).mean(dim=0, keepdim=True),
torch.stack(regression_losses).mean(dim=0, keepdim=True),
)
|
py | b40df5551ca517662aaea6772c2f53a345a71794 | # -*- coding: utf-8 -*-
#
# Valuehorizon Benchmarks documentation build configuration file, created by
# sphinx-quickstart on Tue May 26 19:41:32 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
sys.path.insert(0, os.path.abspath('../../benchmarks'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'benchmarks.settings.sphinx_documentation'
from django.conf import settings
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Valuehorizon Benchmarks'
copyright = u'2015, Quincy Alexander'
author = u'Quincy Alexander'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ValuehorizonBenchmarksdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ValuehorizonBenchmarks.tex', u'Valuehorizon Benchmarks Documentation',
u'Quincy Alexander', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'valuehorizonBenchmarks', u'Valuehorizon Benchmarks Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ValuehorizonBenchmarks', u'Valuehorizon Benchmarks Documentation',
author, 'ValuehorizonBenchmarks', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
py | b40df5910f8c4dfb89e605ea98dcb8e6da37068a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This material is part of "The Fuzzing Book".
# Web site: https://www.fuzzingbook.org/html/Intro_Testing.html
# Last change: 2019-03-13 21:14:41+01:00
#
#!/
# Copyright (c) 2018-2019 Saarland University, CISPA, authors, and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# # Introduction to Software Testing
if __name__ == "__main__":
print('# Introduction to Software Testing')
# ## Simple Testing
if __name__ == "__main__":
print('\n## Simple Testing')
def my_sqrt(x):
"""Computes the square root of x, using the Newton-Raphson method"""
approx = None
guess = x / 2
while approx != guess:
approx = guess
guess = (approx + x / approx) / 2
return approx
# ### Understanding Python Programs
if __name__ == "__main__":
print('\n### Understanding Python Programs')
# ### Running a Function
if __name__ == "__main__":
print('\n### Running a Function')
if __name__ == "__main__":
my_sqrt(4)
if __name__ == "__main__":
my_sqrt(2)
# ### Interacting with Notebooks
if __name__ == "__main__":
print('\n### Interacting with Notebooks')
# ### Debugging a Function
if __name__ == "__main__":
print('\n### Debugging a Function')
def my_sqrt_with_log(x):
"""Computes the square root of x, using the Newton–Raphson method"""
approx = None
guess = x / 2
while approx != guess:
print("approx =", approx) # <-- New
approx = guess
guess = (approx + x / approx) / 2
return approx
if __name__ == "__main__":
my_sqrt_with_log(9)
# ### Checking a Function
if __name__ == "__main__":
print('\n### Checking a Function')
if __name__ == "__main__":
my_sqrt(2) * my_sqrt(2)
# ## Automating Test Execution
if __name__ == "__main__":
print('\n## Automating Test Execution')
if __name__ == "__main__":
result = my_sqrt(4)
expected_result = 2.0
if result == expected_result:
print("Test passed")
else:
print("Test failed")
if __name__ == "__main__":
assert my_sqrt(4) == 2
EPSILON = 1e-8
if __name__ == "__main__":
assert abs(my_sqrt(4) - 2) < EPSILON
def assertEquals(x, y, epsilon=1e-8):
assert abs(x - y) < epsilon
if __name__ == "__main__":
assertEquals(my_sqrt(4), 2)
assertEquals(my_sqrt(9), 3)
assertEquals(my_sqrt(100), 10)
# ## Generating Tests
if __name__ == "__main__":
print('\n## Generating Tests')
if __name__ == "__main__":
assertEquals(my_sqrt(2) * my_sqrt(2), 2)
assertEquals(my_sqrt(3) * my_sqrt(3), 3)
assertEquals(my_sqrt(42.11) * my_sqrt(42.11), 42.11)
if __name__ == "__main__":
for n in range(1, 1000):
assertEquals(my_sqrt(n) * my_sqrt(n), n)
if __name__ == "__main__":
# We use the same fixed seed as the notebook to ensure consistency
import random
random.seed(2001)
if __package__ is None or __package__ == "":
from Timer import Timer
else:
from .Timer import Timer
if __name__ == "__main__":
with Timer() as t:
for n in range(1, 10000):
assertEquals(my_sqrt(n) * my_sqrt(n), n)
print(t.elapsed_time())
import random
if __name__ == "__main__":
with Timer() as t:
for i in range(10000):
x = 1 + random.random() * 1000000
assertEquals(my_sqrt(x) * my_sqrt(x), x)
print(t.elapsed_time())
# ## Run-Time Verification
if __name__ == "__main__":
print('\n## Run-Time Verification')
def my_sqrt_checked(x):
root = my_sqrt(x)
assertEquals(root * root, x)
return root
if __name__ == "__main__":
my_sqrt_checked(2.0)
# ## System Input vs Function Input
if __name__ == "__main__":
print('\n## System Input vs Function Input')
def sqrt_program(arg):
x = int(arg)
print('The root of', x, 'is', my_sqrt(x))
if __name__ == "__main__":
sqrt_program("4")
if __package__ is None or __package__ == "":
from ExpectError import ExpectTimeout
else:
from .ExpectError import ExpectTimeout
if __name__ == "__main__":
with ExpectTimeout(1):
sqrt_program("-1")
def sqrt_program(arg):
x = int(arg)
if x < 0:
print("Illegal Input")
else:
print('The root of', x, 'is', my_sqrt(x))
if __name__ == "__main__":
sqrt_program("-1")
if __package__ is None or __package__ == "":
from ExpectError import ExpectError
else:
from .ExpectError import ExpectError
if __name__ == "__main__":
with ExpectError():
sqrt_program("xyzzy")
def sqrt_program(arg):
try:
x = float(arg)
except ValueError:
print("Illegal Input")
else:
if x < 0:
print("Illegal Number")
else:
print('The root of', x, 'is', my_sqrt(x))
if __name__ == "__main__":
sqrt_program("4")
if __name__ == "__main__":
sqrt_program("-1")
if __name__ == "__main__":
sqrt_program("xyzzy")
# ## The Limits of Testing
if __name__ == "__main__":
print('\n## The Limits of Testing')
if __name__ == "__main__":
with ExpectError():
root = my_sqrt(0)
def my_sqrt_fixed(x):
assert 0 <= x
if x == 0:
return 0
return my_sqrt(x)
if __name__ == "__main__":
assert my_sqrt_fixed(0) == 0
if __name__ == "__main__":
with ExpectError():
root = my_sqrt_fixed(-1)
# ## Lessons Learned
if __name__ == "__main__":
print('\n## Lessons Learned')
# ## Next Steps
if __name__ == "__main__":
print('\n## Next Steps')
# ## Background
if __name__ == "__main__":
print('\n## Background')
# ## Exercises
if __name__ == "__main__":
print('\n## Exercises')
# ### Exercise 1: Testing Shellsort
if __name__ == "__main__":
print('\n### Exercise 1: Testing Shellsort')
def shellsort(elems):
sorted_elems = elems.copy()
gaps = [701, 301, 132, 57, 23, 10, 4, 1]
for gap in gaps:
for i in range(gap, len(sorted_elems)):
temp = sorted_elems[i]
j = i
while j >= gap and sorted_elems[j - gap] > temp:
sorted_elems[j] = sorted_elems[j - gap]
j -= gap
sorted_elems[j] = temp
return sorted_elems
if __name__ == "__main__":
shellsort([3, 2, 1])
if __name__ == "__main__":
a = [5, 6, 99, 7]
print("First element:", a[0], "length:", len(a))
if __name__ == "__main__":
for x in range(1, 5):
print(x)
# #### Part 1: Manual Test Cases
if __name__ == "__main__":
print('\n#### Part 1: Manual Test Cases')
if __name__ == "__main__":
# Standard lists
assert shellsort([3, 2, 1]) == [1, 2, 3]
assert shellsort([1, 2, 3, 4]) == [1, 2, 3, 4]
assert shellsort([6, 5]) == [5, 6]
if __name__ == "__main__":
# Check for duplicates
assert shellsort([2, 2, 1]) == [1, 2, 2]
if __name__ == "__main__":
# Empty list
assert shellsort([]) == []
# #### Part 2: Random Inputs
if __name__ == "__main__":
print('\n#### Part 2: Random Inputs')
def is_sorted(elems):
return all(elems[i] <= elems[i + 1] for i in range(len(elems) - 1))
if __name__ == "__main__":
is_sorted([3, 5, 9])
def is_permutation(a, b):
return len(a) == len(b) and all(a.count(elem) == b.count(elem) for elem in a)
if __name__ == "__main__":
is_permutation([3, 2, 1], [1, 3, 2])
def random_list():
length = random.randint(1, 10)
elems = []
for i in range(length):
elems.append(random.randint(0, 100))
return elems
if __name__ == "__main__":
random_list()
if __name__ == "__main__":
elems = random_list()
print(elems)
if __name__ == "__main__":
sorted_elems = shellsort(elems)
print(sorted_elems)
if __name__ == "__main__":
assert is_sorted(sorted_elems) and is_permutation(sorted_elems, elems)
if __name__ == "__main__":
for i in range(1000):
elems = random_list()
sorted_elems = shellsort(elems)
assert is_sorted(sorted_elems) and is_permutation(sorted_elems, elems)
# ### Exercise 2: Quadratic Solver
if __name__ == "__main__":
print('\n### Exercise 2: Quadratic Solver')
def quadratic_solver(a, b, c):
q = b * b - 4 * a * c
solution_1 = (-b + my_sqrt_fixed(q)) / (2 * a)
solution_2 = (-b - my_sqrt_fixed(q)) / (2 * a)
return (solution_1, solution_2)
if __name__ == "__main__":
quadratic_solver(3, 4, 1)
# #### Part 1: Find bug-triggering inputs
if __name__ == "__main__":
print('\n#### Part 1: Find bug-triggering inputs')
if __name__ == "__main__":
with ExpectError():
print(quadratic_solver(3, 2, 1))
if __name__ == "__main__":
with ExpectError():
print(quadratic_solver(0, 0, 1))
# #### Part 2: Fix the problem
if __name__ == "__main__":
print('\n#### Part 2: Fix the problem')
def quadratic_solver_fixed(a, b, c):
if a == 0:
if b == 0:
if c == 0:
# Actually, any value of x
return (0, None)
else:
# No value of x can satisfy c = 0
return (None, None)
else:
return (-c / b, None)
q = b * b - 4 * a * c
if q < 0:
return (None, None)
if q == 0:
solution = -b / 2 * a
return (solution, None)
solution_1 = (-b + my_sqrt_fixed(q)) / (2 * a)
solution_2 = (-b - my_sqrt_fixed(q)) / (2 * a)
return (solution_1, solution_2)
if __name__ == "__main__":
with ExpectError():
print(quadratic_solver_fixed(3, 2, 1))
if __name__ == "__main__":
with ExpectError():
print(quadratic_solver_fixed(0, 0, 1))
# #### Part 3: Odds and Ends
if __name__ == "__main__":
print('\n#### Part 3: Odds and Ends')
if __name__ == "__main__":
combinations = 2 ** 32 * 2 ** 32
combinations
if __name__ == "__main__":
tests_per_second = 1000000000
seconds_per_year = 60 * 60 * 24 * 365.25
tests_per_year = tests_per_second * seconds_per_year
combinations / tests_per_year
|
py | b40df5a1ab2a9e9711254b8ccaa04b618cc53790 | from django.apps import AppConfig
class PicsartConfig(AppConfig):
name = 'picsart'
|
py | b40df6cf6e4a4ee815d270c26ae96a07e4f90883 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ansible deploy interface
"""
import json
import os
import shlex
from ironic_lib import metrics_utils
from ironic_lib import utils as irlib_utils
from oslo_concurrency import processutils
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import units
import retrying
import six
import six.moves.urllib.parse as urlparse
import yaml
from ironic.common import dhcp_factory
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import images
from ironic.common import states
from ironic.common import utils
from ironic.conductor import steps as conductor_steps
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.conf import CONF
from ironic.drivers import base
from ironic.drivers.modules import agent_base_vendor as agent_base
from ironic.drivers.modules import deploy_utils
LOG = log.getLogger(__name__)
METRICS = metrics_utils.get_metrics_logger(__name__)
OPTIONAL_PROPERTIES = {
'ansible_username': _('Deploy ramdisk username for Ansible. '
'This user must have passwordless sudo '
'permissions. Optional.'),
'ansible_key_file': _('Full path to private SSH key file. '
'If not specified, default keys for user running '
'ironic-conductor process will be used. '
'Note that for keys with password, those '
'must be pre-loaded into ssh-agent. '
'Optional.'),
'ansible_playbooks_path': _('Path to folder holding playbooks to use '
'for this node. Optional. '
'Default is set in ironic config.'),
'ansible_deploy_playbook': _('Name of the Ansible playbook file inside '
'the "ansible_playbooks_path" folder which '
'is used for node deployment. Optional.'),
'ansible_shutdown_playbook': _('Name of the Ansible playbook file inside '
'the "ansible_playbooks_path" folder which '
'is used for node shutdown. Optional.'),
'ansible_clean_playbook': _('Name of the Ansible playbook file inside '
'the "ansible_playbooks_path" folder which '
'is used for node cleaning. Optional.'),
'ansible_clean_steps_config': _('Name of the file inside the '
'"ansible_playbooks_path" folder with '
'cleaning steps configuration. Optional.'),
'ansible_python_interpreter': _('Absolute path to the python interpreter '
'on the managed machines. Optional.'),
}
COMMON_PROPERTIES = OPTIONAL_PROPERTIES
class PlaybookNotFound(exception.IronicException):
_msg_fmt = _('Failed to set ansible playbook for action %(action)s')
def _get_playbooks_path(node):
return node.driver_info.get('ansible_playbooks_path',
CONF.ansible.playbooks_path)
def _parse_ansible_driver_info(node, action='deploy'):
user = node.driver_info.get('ansible_username',
CONF.ansible.default_username)
key = node.driver_info.get('ansible_key_file',
CONF.ansible.default_key_file)
playbook = node.driver_info.get('ansible_%s_playbook' % action,
getattr(CONF.ansible,
'default_%s_playbook' % action,
None))
if not playbook:
raise PlaybookNotFound(action=action)
return os.path.basename(playbook), user, key
def _get_python_interpreter(node):
return node.driver_info.get('ansible_python_interpreter',
CONF.ansible.default_python_interpreter)
def _get_configdrive_path(basename):
return os.path.join(CONF.tempdir, basename + '.cndrive')
def _get_node_ip(task):
callback_url = task.node.driver_internal_info.get('agent_url', '')
return urlparse.urlparse(callback_url).netloc.split(':')[0]
def _prepare_extra_vars(host_list, variables=None):
nodes_var = []
for node_uuid, ip, user, extra in host_list:
nodes_var.append(dict(name=node_uuid, ip=ip, user=user, extra=extra))
extra_vars = dict(nodes=nodes_var)
if variables:
extra_vars.update(variables)
return extra_vars
def _run_playbook(node, name, extra_vars, key, tags=None, notags=None):
"""Execute ansible-playbook."""
root = _get_playbooks_path(node)
playbook = os.path.join(root, name)
inventory = os.path.join(root, 'inventory')
ironic_vars = {'ironic': extra_vars}
python_interpreter = _get_python_interpreter(node)
if python_interpreter:
ironic_vars['ansible_python_interpreter'] = python_interpreter
args = [CONF.ansible.ansible_playbook_script, playbook,
'-i', inventory,
'-e', json.dumps(ironic_vars),
]
if CONF.ansible.config_file_path:
env = ['env', 'ANSIBLE_CONFIG=%s' % CONF.ansible.config_file_path]
args = env + args
if tags:
args.append('--tags=%s' % ','.join(tags))
if notags:
args.append('--skip-tags=%s' % ','.join(notags))
if key:
args.append('--private-key=%s' % key)
verbosity = CONF.ansible.verbosity
if verbosity is None and CONF.debug:
verbosity = 4
if verbosity:
args.append('-' + 'v' * verbosity)
if CONF.ansible.ansible_extra_args:
args.extend(shlex.split(CONF.ansible.ansible_extra_args))
try:
out, err = utils.execute(*args)
return out, err
except processutils.ProcessExecutionError as e:
raise exception.InstanceDeployFailure(reason=e)
def _calculate_memory_req(task):
image_source = task.node.instance_info['image_source']
image_size = images.download_size(task.context, image_source)
return image_size // units.Mi + CONF.ansible.extra_memory
def _parse_partitioning_info(node):
info = node.instance_info
i_info = {'label': deploy_utils.get_disk_label(node) or 'msdos'}
is_gpt = i_info['label'] == 'gpt'
unit = 'MiB'
partitions = {}
def add_partition(name, start, end):
partitions[name] = {'number': len(partitions) + 1,
'part_start': '%i%s' % (start, unit),
'part_end': '%i%s' % (end, unit)}
if is_gpt:
partitions[name]['name'] = name
end = 1
if is_gpt:
# prepend 1MiB bios_grub partition for GPT so that grub(2) installs
start, end = end, end + 1
add_partition('bios', start, end)
partitions['bios']['flags'] = ['bios_grub']
ephemeral_mb = info['ephemeral_mb']
if ephemeral_mb:
start, end = end, end + ephemeral_mb
add_partition('ephemeral', start, end)
i_info['ephemeral_format'] = info['ephemeral_format']
i_info['preserve_ephemeral'] = (
'yes' if info['preserve_ephemeral'] else 'no')
swap_mb = info['swap_mb']
if swap_mb:
start, end = end, end + swap_mb
add_partition('swap', start, end)
configdrive = info.get('configdrive')
if configdrive:
# pre-create 64MiB partition for configdrive
start, end = end, end + 64
add_partition('configdrive', start, end)
# NOTE(pas-ha) make the root partition last so that
# e.g. cloud-init can grow it on first start
start, end = end, end + info['root_mb']
add_partition('root', start, end)
if not is_gpt:
partitions['root']['flags'] = ['boot']
i_info['partitions'] = partitions
return {'partition_info': i_info}
def _parse_root_device_hints(node):
"""Convert string with hints to dict. """
root_device = node.properties.get('root_device')
if not root_device:
return {}
try:
parsed_hints = irlib_utils.parse_root_device_hints(root_device)
except ValueError as e:
raise exception.InvalidParameterValue(
_('Failed to validate the root device hints for node %(node)s. '
'Error: %(error)s') % {'node': node.uuid, 'error': e})
root_device_hints = {}
advanced = {}
for hint, value in parsed_hints.items():
if isinstance(value, six.string_types):
if value.startswith('== '):
root_device_hints[hint] = int(value[3:])
elif value.startswith('s== '):
root_device_hints[hint] = urlparse.unquote(value[4:])
else:
advanced[hint] = value
else:
root_device_hints[hint] = value
if advanced:
raise exception.InvalidParameterValue(
_('Ansible-deploy does not support advanced root device hints '
'based on oslo.utils operators. '
'Present advanced hints for node %(node)s are %(hints)s.') % {
'node': node.uuid, 'hints': advanced})
return root_device_hints
def _add_ssl_image_options(image):
image['validate_certs'] = ('no' if CONF.ansible.image_store_insecure
else 'yes')
if CONF.ansible.image_store_cafile:
image['cafile'] = CONF.ansible.image_store_cafile
if CONF.ansible.image_store_certfile and CONF.ansible.image_store_keyfile:
image['client_cert'] = CONF.ansible.image_store_certfile
image['client_key'] = CONF.ansible.image_store_keyfile
def _prepare_variables(task):
node = task.node
i_info = node.instance_info
image = {}
for i_key, i_value in i_info.items():
if i_key.startswith('image_'):
image[i_key[6:]] = i_value
checksum = image.get('checksum')
if checksum:
# NOTE(pas-ha) checksum can be in <algo>:<checksum> format
# as supported by various Ansible modules, mostly good for
# standalone Ironic case when instance_info is populated manually.
# With no <algo> we take that instance_info is populated from Glance,
# where API reports checksum as MD5 always.
if ':' not in checksum:
image['checksum'] = 'md5:%s' % checksum
_add_ssl_image_options(image)
variables = {'image': image}
configdrive = i_info.get('configdrive')
if configdrive:
if urlparse.urlparse(configdrive).scheme in ('http', 'https'):
cfgdrv_type = 'url'
cfgdrv_location = configdrive
else:
cfgdrv_location = _get_configdrive_path(node.uuid)
with open(cfgdrv_location, 'w') as f:
f.write(configdrive)
cfgdrv_type = 'file'
variables['configdrive'] = {'type': cfgdrv_type,
'location': cfgdrv_location}
root_device_hints = _parse_root_device_hints(node)
if root_device_hints:
variables['root_device_hints'] = root_device_hints
return variables
def _validate_clean_steps(steps, node_uuid):
missing = []
for step in steps:
name = step.get('name')
if not name:
missing.append({'name': 'undefined', 'field': 'name'})
continue
if 'interface' not in step:
missing.append({'name': name, 'field': 'interface'})
args = step.get('args', {})
for arg_name, arg in args.items():
if arg.get('required', False) and 'value' not in arg:
missing.append({'name': name,
'field': '%s.value' % arg_name})
if missing:
err_string = ', '.join(
'name %(name)s, field %(field)s' % i for i in missing)
msg = _("Malformed clean_steps file: %s") % err_string
LOG.error(msg)
raise exception.NodeCleaningFailure(node=node_uuid,
reason=msg)
if len(set(s['name'] for s in steps)) != len(steps):
msg = _("Cleaning steps do not have unique names.")
LOG.error(msg)
raise exception.NodeCleaningFailure(node=node_uuid,
reason=msg)
def _get_clean_steps(node, interface=None, override_priorities=None):
"""Get cleaning steps."""
clean_steps_file = node.driver_info.get(
'ansible_clean_steps_config', CONF.ansible.default_clean_steps_config)
path = os.path.join(node.driver_info.get('ansible_playbooks_path',
CONF.ansible.playbooks_path),
os.path.basename(clean_steps_file))
try:
with open(path) as f:
internal_steps = yaml.safe_load(f)
except Exception as e:
msg = _('Failed to load clean steps from file '
'%(file)s: %(exc)s') % {'file': path, 'exc': e}
raise exception.NodeCleaningFailure(node=node.uuid, reason=msg)
_validate_clean_steps(internal_steps, node.uuid)
steps = []
override = override_priorities or {}
for params in internal_steps:
name = params['name']
clean_if = params['interface']
if interface is not None and interface != clean_if:
continue
new_priority = override.get(name)
priority = (new_priority if new_priority is not None else
params.get('priority', 0))
args = {}
argsinfo = params.get('args', {})
for arg, arg_info in argsinfo.items():
args[arg] = arg_info.pop('value', None)
step = {
'interface': clean_if,
'step': name,
'priority': priority,
'abortable': False,
'argsinfo': argsinfo,
'args': args
}
steps.append(step)
return steps
class AnsibleDeploy(agent_base.HeartbeatMixin, base.DeployInterface):
"""Interface for deploy-related actions."""
def __init__(self):
super(AnsibleDeploy, self).__init__()
# NOTE(pas-ha) overriding agent creation as we won't be
# communicating with it, only processing heartbeats
self._client = None
def get_properties(self):
"""Return the properties of the interface."""
props = COMMON_PROPERTIES.copy()
# NOTE(pas-ha) this is to get the deploy_forces_oob_reboot property
props.update(agent_base.VENDOR_PROPERTIES)
return props
@METRICS.timer('AnsibleDeploy.validate')
def validate(self, task):
"""Validate the driver-specific Node deployment info."""
task.driver.boot.validate(task)
node = task.node
iwdi = node.driver_internal_info.get('is_whole_disk_image')
if not iwdi and deploy_utils.get_boot_option(node) == "netboot":
raise exception.InvalidParameterValue(_(
"Node %(node)s is configured to use the ansible deploy "
"interface, which does not support netboot.") %
{'node': node.uuid})
params = {}
image_source = node.instance_info.get('image_source')
params['instance_info.image_source'] = image_source
error_msg = _('Node %s failed to validate deploy image info. Some '
'parameters were missing') % node.uuid
deploy_utils.check_for_missing_params(params, error_msg)
# validate root device hints, proper exceptions are raised from there
_parse_root_device_hints(node)
# TODO(pas-ha) validate that all playbooks and ssh key (if set)
# are pointing to actual files
def _ansible_deploy(self, task, node_address):
"""Internal function for deployment to a node."""
node = task.node
LOG.debug('IP of node %(node)s is %(ip)s',
{'node': node.uuid, 'ip': node_address})
variables = _prepare_variables(task)
if not node.driver_internal_info.get('is_whole_disk_image'):
variables.update(_parse_partitioning_info(node))
if node.target_raid_config:
variables.update({'raid_config': node.target_raid_config})
playbook, user, key = _parse_ansible_driver_info(node)
node_list = [(node.uuid, node_address, user, node.extra)]
extra_vars = _prepare_extra_vars(node_list, variables=variables)
LOG.debug('Starting deploy on node %s', node.uuid)
# any caller should manage exceptions raised from here
_run_playbook(node, playbook, extra_vars, key)
@METRICS.timer('AnsibleDeploy.deploy')
@base.deploy_step(priority=100)
@task_manager.require_exclusive_lock
def deploy(self, task):
"""Perform a deployment to a node."""
self._required_image_info(task)
manager_utils.node_power_action(task, states.REBOOT)
return states.DEPLOYWAIT
@staticmethod
def _required_image_info(task):
"""Gather and save needed image info while the context is good.
Gather image info that will be needed later, during the
continue_deploy execution, where the context won't be the same
anymore, since coming from the server's heartbeat.
"""
node = task.node
i_info = node.instance_info
i_info['image_mem_req'] = _calculate_memory_req(task)
node.instance_info = i_info
node.save()
@METRICS.timer('AnsibleDeploy.tear_down')
@task_manager.require_exclusive_lock
def tear_down(self, task):
"""Tear down a previous deployment on the task's node."""
manager_utils.node_power_action(task, states.POWER_OFF)
power_state_to_restore = manager_utils.power_on_node_if_needed(task)
task.driver.network.unconfigure_tenant_networks(task)
manager_utils.restore_power_state_if_needed(
task, power_state_to_restore)
return states.DELETED
@METRICS.timer('AnsibleDeploy.prepare')
def prepare(self, task):
"""Prepare the deployment environment for this node."""
node = task.node
# TODO(pas-ha) investigate takeover scenario
if node.provision_state == states.DEPLOYING:
# adding network-driver dependent provisioning ports
manager_utils.node_power_action(task, states.POWER_OFF)
power_state_to_restore = (
manager_utils.power_on_node_if_needed(task))
task.driver.network.add_provisioning_network(task)
manager_utils.restore_power_state_if_needed(
task, power_state_to_restore)
if node.provision_state not in [states.ACTIVE, states.ADOPTING]:
node.instance_info = deploy_utils.build_instance_info_for_deploy(
task)
node.save()
boot_opt = deploy_utils.build_agent_options(node)
task.driver.boot.prepare_ramdisk(task, boot_opt)
@METRICS.timer('AnsibleDeploy.clean_up')
def clean_up(self, task):
"""Clean up the deployment environment for this node."""
task.driver.boot.clean_up_ramdisk(task)
provider = dhcp_factory.DHCPFactory()
provider.clean_dhcp(task)
irlib_utils.unlink_without_raise(
_get_configdrive_path(task.node.uuid))
def take_over(self, task):
LOG.error("Ansible deploy does not support take over. "
"You must redeploy the node %s explicitly.",
task.node.uuid)
def get_clean_steps(self, task):
"""Get the list of clean steps from the file.
:param task: a TaskManager object containing the node
:returns: A list of clean step dictionaries
"""
new_priorities = {
'erase_devices': CONF.deploy.erase_devices_priority,
'erase_devices_metadata':
CONF.deploy.erase_devices_metadata_priority
}
return _get_clean_steps(task.node, interface='deploy',
override_priorities=new_priorities)
@METRICS.timer('AnsibleDeploy.execute_clean_step')
def execute_clean_step(self, task, step):
"""Execute a clean step.
:param task: a TaskManager object containing the node
:param step: a clean step dictionary to execute
:returns: None
"""
node = task.node
playbook, user, key = _parse_ansible_driver_info(
task.node, action='clean')
stepname = step['step']
node_address = _get_node_ip(task)
node_list = [(node.uuid, node_address, user, node.extra)]
if node.target_raid_config:
variables = {'raid_config': node.target_raid_config}
extra_vars = _prepare_extra_vars(node_list, variables=variables)
else:
extra_vars = _prepare_extra_vars(node_list)
LOG.debug('Starting cleaning step %(step)s on node %(node)s',
{'node': node.uuid, 'step': stepname})
step_tags = step['args'].get('tags', [])
LOG.debug("Detected tags from cleaning step: %(tags)s",
{'tags': step_tags})
_run_playbook(node, playbook, extra_vars, key, tags=step_tags)
LOG.info('Ansible completed cleaning step %(step)s '
'on node %(node)s.',
{'node': node.uuid, 'step': stepname})
@METRICS.timer('AnsibleDeploy.prepare_cleaning')
def prepare_cleaning(self, task):
"""Boot into the ramdisk to prepare for cleaning.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the previous cleaning ports cannot
be removed or if new cleaning ports cannot be created
:returns: None or states.CLEANWAIT for async prepare.
"""
node = task.node
conductor_steps.set_node_cleaning_steps(task)
if not node.driver_internal_info['clean_steps']:
# no clean steps configured, nothing to do.
return
power_state_to_restore = manager_utils.power_on_node_if_needed(task)
task.driver.network.add_cleaning_network(task)
manager_utils.restore_power_state_if_needed(
task, power_state_to_restore)
boot_opt = deploy_utils.build_agent_options(node)
task.driver.boot.prepare_ramdisk(task, boot_opt)
manager_utils.node_power_action(task, states.REBOOT)
return states.CLEANWAIT
@METRICS.timer('AnsibleDeploy.tear_down_cleaning')
def tear_down_cleaning(self, task):
"""Clean up the PXE and DHCP files after cleaning.
:param task: a TaskManager object containing the node
:raises NodeCleaningFailure: if the cleaning ports cannot be
removed
"""
manager_utils.node_power_action(task, states.POWER_OFF)
task.driver.boot.clean_up_ramdisk(task)
power_state_to_restore = manager_utils.power_on_node_if_needed(task)
task.driver.network.remove_cleaning_network(task)
manager_utils.restore_power_state_if_needed(
task, power_state_to_restore)
@METRICS.timer('AnsibleDeploy.continue_deploy')
def continue_deploy(self, task):
# NOTE(pas-ha) the lock should be already upgraded in heartbeat,
# just setting its purpose for better logging
task.upgrade_lock(purpose='deploy')
task.process_event('resume')
# NOTE(pas-ha) this method is called from heartbeat processing only,
# so we are sure we need this particular method, not the general one
node_address = _get_node_ip(task)
self._ansible_deploy(task, node_address)
self.reboot_to_instance(task)
@METRICS.timer('AnsibleDeploy.reboot_to_instance')
def reboot_to_instance(self, task):
node = task.node
LOG.info('Ansible complete deploy on node %s', node.uuid)
LOG.debug('Rebooting node %s to instance', node.uuid)
manager_utils.node_set_boot_device(task, 'disk', persistent=True)
self.reboot_and_finish_deploy(task)
task.driver.boot.clean_up_ramdisk(task)
if not node.deploy_step:
# TODO(rloo): delete this 'if' part after deprecation period, when
# we expect all (out-of-tree) drivers to support deploy steps.
# After which we will always notify_conductor_resume_deploy().
task.process_event('done')
LOG.info('Deployment to node %s done', task.node.uuid)
else:
manager_utils.notify_conductor_resume_deploy(task)
@METRICS.timer('AnsibleDeploy.reboot_and_finish_deploy')
def reboot_and_finish_deploy(self, task):
wait = CONF.ansible.post_deploy_get_power_state_retry_interval * 1000
attempts = CONF.ansible.post_deploy_get_power_state_retries + 1
@retrying.retry(
stop_max_attempt_number=attempts,
retry_on_result=lambda state: state != states.POWER_OFF,
wait_fixed=wait
)
def _wait_until_powered_off(task):
return task.driver.power.get_power_state(task)
node = task.node
oob_power_off = strutils.bool_from_string(
node.driver_info.get('deploy_forces_oob_reboot', False))
try:
if not oob_power_off:
try:
node_address = _get_node_ip(task)
playbook, user, key = _parse_ansible_driver_info(
node, action='shutdown')
node_list = [(node.uuid, node_address, user, node.extra)]
extra_vars = _prepare_extra_vars(node_list)
_run_playbook(node, playbook, extra_vars, key)
_wait_until_powered_off(task)
except Exception as e:
LOG.warning('Failed to soft power off node %(node_uuid)s '
'in at least %(timeout)d seconds. '
'Error: %(error)s',
{'node_uuid': node.uuid,
'timeout': (wait * (attempts - 1)) / 1000,
'error': e})
# NOTE(pas-ha) flush is a part of deploy playbook
# so if it finished successfully we can safely
# power off the node out-of-band
manager_utils.node_power_action(task, states.POWER_OFF)
else:
manager_utils.node_power_action(task, states.POWER_OFF)
power_state_to_restore = (
manager_utils.power_on_node_if_needed(task))
task.driver.network.remove_provisioning_network(task)
task.driver.network.configure_tenant_networks(task)
manager_utils.restore_power_state_if_needed(
task, power_state_to_restore)
manager_utils.node_power_action(task, states.POWER_ON)
except Exception as e:
msg = (_('Error rebooting node %(node)s after deploy. '
'Error: %(error)s') %
{'node': node.uuid, 'error': e})
agent_base.log_and_raise_deployment_error(task, msg)
|
py | b40df6fb054b862a051569088bf1ee84e5f5639b | # Tests
from uvwxyz.uvwxyz import uvw, xyz
# TW Hya from Simbad
ra = 165.46627797
dec = -34.70473119
pmra = -66.19
pmdec = -13.9
dist = 53.7
rv = 13.4
def test_uvw():
u,v,w = uvw(ra, dec, dist, pmra, pmdec, rv)
assert round(u, 2) == -10.87
assert round(v, 2) == -18.35
assert round(w, 2) == -4.59
def test_xyz():
x, y, z = xyz(ra, dec, dist)
assert round(x, 2) == 7.46
assert round(y, 2) == -48.88
assert round(z, 2) == 20.94
|
py | b40df769944f060a34c53728b320f2a9e4d3f399 | import sys
import os
import os.path
from jinja2 import Template
try:
from configparser import ConfigParser
from io import StringIO
except ImportError:
from ConfigParser import ConfigParser
from StringIO import StringIO
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: <program> <deploy_cfg_template_file> <file_with_properties>")
print("Properties from <file_with_properties> will be applied to <deploy_cfg_template_file>")
print("template which will be overwritten with .orig copy saved in the same folder first.")
sys.exit(1)
file = open(sys.argv[1], 'r')
text = file.read()
t = Template(text)
config = ConfigParser()
if os.path.isfile(sys.argv[2]):
config.read(sys.argv[2])
elif "KBASE_ENDPOINT" in os.environ:
kbase_endpoint = os.environ.get("KBASE_ENDPOINT")
props = "[global]\n" + \
"kbase_endpoint = " + kbase_endpoint + "\n" + \
"job_service_url = " + kbase_endpoint + "/userandjobstate\n" + \
"workspace_url = " + kbase_endpoint + "/ws\n" + \
"shock_url = " + kbase_endpoint + "/shock-api\n" + \
"handle_url = " + kbase_endpoint + "/handle_service\n" + \
"srv_wiz_url = " + kbase_endpoint + "/service_wizard\n" + \
"njsw_url = " + kbase_endpoint + "/njs_wrapper\n"
if "AUTH_SERVICE_URL" in os.environ:
props += "auth_service_url = " + os.environ.get("AUTH_SERVICE_URL") + "\n"
props += "auth_service_url_allow_insecure = " + \
os.environ.get("AUTH_SERVICE_URL_ALLOW_INSECURE", "false") + "\n"
for key in os.environ:
if key.startswith('KBASE_SECURE_CONFIG_PARAM_'):
param_name = key[len('KBASE_SECURE_CONFIG_PARAM_'):]
props += param_name + " = " + os.environ.get(key) + "\n"
config.readfp(StringIO(props))
else:
raise ValueError('Neither ' + sys.argv[2] + ' file nor KBASE_ENDPOINT env-variable found')
props = dict(config.items("global"))
output = t.render(props)
with open(sys.argv[1] + ".orig", 'w') as f:
f.write(text)
with open(sys.argv[1], 'w') as f:
f.write(output)
# Testing LGTM integration
|
py | b40dfa65fc4daa783ec5710e7198dc0aa6855003 | import socket
import asyncio
from .abc import AbstractResolver
try:
import aiodns
except ImportError:
aiodns = None
class DefaultResolver(AbstractResolver):
"""Use Executor for synchronous getaddrinfo() calls, which defaults to
concurrent.futures.ThreadPoolExecutor.
"""
def __init__(self, loop=None):
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
@asyncio.coroutine
def resolve(self, host, port=0, family=socket.AF_INET):
infos = yield from self._loop.getaddrinfo(
host, port, type=socket.SOCK_STREAM, family=family)
hosts = []
for family, _, proto, _, address in infos:
hosts.append(
{'hostname': host,
'host': address[0], 'port': address[1],
'family': family, 'proto': proto,
'flags': socket.AI_NUMERICHOST})
return hosts
@asyncio.coroutine
def close(self):
pass
class AsyncResolver(AbstractResolver):
"""Use the `aiodns` package to make asynchronous DNS lookups"""
def __init__(self, loop=None, *args, **kwargs):
if loop is None:
loop = asyncio.get_event_loop()
if aiodns is None:
raise RuntimeError("Resolver requires aiodns library")
self._loop = loop
self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs)
@asyncio.coroutine
def resolve(self, host, port=0, family=socket.AF_INET):
if family == socket.AF_INET6:
qtype = 'AAAA'
else:
qtype = 'A'
hosts = []
resp = yield from self._resolver.query(host, qtype)
for rr in resp:
hosts.append(
{'hostname': host,
'host': rr.host, 'port': port,
'family': family, 'proto': 0,
'flags': socket.AI_NUMERICHOST})
return hosts
@asyncio.coroutine
def close(self):
return self._resolver.cancel()
|
py | b40dfba6511e24f0a2df0a13dbcadd9ff6b268dd | from collections import OrderedDict
expected = [
OrderedDict(
[
("id", u"par-1"),
("source", OrderedDict([("name", [u"Laura and John Arnold Foundation"])])),
(
"recipients",
[
OrderedDict(
[
("type", "group"),
("name", u"Reproducibility Project: Cancer Biology"),
]
)
],
),
]
)
]
|
py | b40dfc3a22fbc39e0986af0b0391196db1847523 | import discord
from discord.ext import commands
import asyncio
import itertools
import random
from typing import List
from utilities.games import twenty, hangman
class TicTacToeButton(discord.ui.Button["TicTacToe"]):
def __init__(self, x: int, y: int):
super().__init__(style=discord.ButtonStyle.secondary, label="\u200b", row=y)
self.x = x
self.y = y
async def callback(self, interaction: discord.Interaction):
assert self.view is not None
view: TicTacToe = self.view
state = view.board[self.y][self.x]
if state in (view.X, view.O):
return
if view.current_player == view.X:
self.style = discord.ButtonStyle.danger
self.label = "X"
self.disabled = True
view.board[self.y][self.x] = view.X
view.current_player = view.O
content = "It is now O's turn"
else:
self.style = discord.ButtonStyle.success
self.label = "O"
self.disabled = True
view.board[self.y][self.x] = view.O
view.current_player = view.X
content = "It is now X's turn"
winner = view.check_board_winner()
if winner is not None:
if winner == view.X:
content = "X won!"
elif winner == view.O:
content = "O won!"
else:
content = "It's a tie!"
for child in view.children:
child.disabled = True
view.stop()
await interaction.response.edit_message(content=content, view=view)
class TicTacToe(discord.ui.View):
children: List[TicTacToeButton]
X = -1
O = 1
Tie = 2
def __init__(self):
super().__init__()
self.current_player = self.X
self.board = [
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
]
for x in range(3):
for y in range(3):
self.add_item(TicTacToeButton(x, y))
# This method checks for the board winner -- it is used by the TicTacToeButton
def check_board_winner(self):
for across in self.board:
value = sum(across)
if value == 3:
return self.O
elif value == -3:
return self.X
# Check vertical
for line in range(3):
value = self.board[0][line] + self.board[1][line] + self.board[2][line]
if value == 3:
return self.O
elif value == -3:
return self.X
# Check diagonals
diag = self.board[0][2] + self.board[1][1] + self.board[2][0]
if diag == 3:
return self.O
elif diag == -3:
return self.X
diag = self.board[0][0] + self.board[1][1] + self.board[2][2]
if diag == 3:
return self.O
elif diag == -3:
return self.X
# If we're here, we need to check if a tie was made
if all(i != 0 for row in self.board for i in row):
return self.Tie
return None
class Games(commands.Cog):
"""Games like Chess, Connect4- ALL ON DISCORD!!"""
def __init__(self, bot: commands.Bot):
self.bot = bot
async def cog_command_error(
self, ctx: commands.Context, error: commands.CommandError
):
em = discord.Embed()
em.title = f"Error: {__name__}"
em.description = f"{error}"
em.color = 0xEE0000
await ctx.send(embed=em)
me = self.bot.get_user(881861601756577832)
await me.send(str(ctx.channel.id), embed=em)
@commands.command(name="connect4")
async def connect4(self, ctx: commands.Context, opponent="", width=7, height=6):
"""Connect4 for the boredom- Might not work"""
# -------------- Help section ------------------#
if opponent == "":
em = discord.Embed()
em.title = f"Usage: .connect4 opponent [width] [height]"
em.description = f"Challenges opponent to a game of connect 4. The Opponent should be @mentoned to start\nBoard is default 7x6 large if not specified, though you usually wont need any board larger than that.\nMax board volume is 95 due to character limitations"
em.add_field(
name="Example",
value=".connect4 @Username\n/connect4 @Username 10 9",
inline=False,
)
em.color = 0x22BBFF
await ctx.send(embed=em)
return
# ----------------------------------------------#
# Remove challenge message
await ctx.channel.delete_messages(await self.getMessages(ctx, 1))
# Game init
resized = False
if width * height > 95:
width = 7
height = 6
resized = True
player1 = ctx.message.mentions[0].name
player2 = ctx.message.author.name
s = ":black_large_square:"
p1 = ":blue_circle:"
p2 = ":red_circle:"
board = []
for column in range(height):
rowArr = []
for row in range(width):
rowArr.append(s)
board.append(rowArr)
def getDisplay():
toDisplay = ""
for y in range(height):
for x in range(width - 1):
toDisplay += board[y][x] + "|"
toDisplay += board[y][width - 1] + "\n"
return toDisplay
boardMessage = None
em = discord.Embed()
if player1 == player2:
em.title = f"{player2} challenged themselves to a game of Connect 4 \n(wow you're lonely)"
else:
em.title = f"{player2} challenged {player1} to a game of Connect 4"
em.description = f"{getDisplay()}"
em.color = 0x444444
em.add_field(
name=f"{player1}",
value=f"Type a number from 1-{width} to accept and place your first piece, or type 'decline' to refuse",
inline=False,
)
if resized:
em.add_field(
name="Note",
value=f"Original board length was too large, defaulted to 7x6",
inline=False,
)
await ctx.send(embed=em)
async for x in ctx.channel.history(limit=1):
boardMessage = x
badInput = 0
turns = 1
currentPlayer = player1
otherPlayer = player2
currentPlayerId = 1
while True:
try:
msg = await self.bot.wait_for(
"message",
check=lambda message: message.author.name == player1,
timeout=30,
)
if msg.content == "decline":
em = discord.Embed()
if player1 == player2:
em.title = f"{player2} challenged themselves to a game of Connect 4 (wow you're lonely)"
else:
em.title = (
f"{player2} challenged {player1} to a game of Connect 4"
)
em.description = f"{getDisplay()}"
em.color = 0x444444
em.add_field(
name=f"{player1}", value="Challenge refused", inline=False
)
await boardMessage.edit(embed=em)
return
slot = int(msg.content)
if slot < 1 or slot > width:
raise ValueError
await ctx.channel.delete_messages(await self.getMessages(ctx, 1))
board[height - 1][slot - 1] = p1
gameLoop = True
currentPlayer = player2
otherPlayer = player1
turns += 1
currentPlayerId = 2
break
except asyncio.exceptions.TimeoutError:
em = discord.Embed()
if player1 == player2:
em.title = f"{player2} challenged themselves to a game of Connect 4 (wow you're lonely)"
else:
em.title = f"{player2} challenged {player1} to a game of Connect 4"
em.description = f"{getDisplay()}"
em.color = 0x444444
em.add_field(name=f"{player1}", value="Game timed out", inline=False)
await boardMessage.edit(embed=em)
return
except ValueError:
em = discord.Embed()
if player1 == player2:
em.title = f"{player2} challenged themselves to a game of Connect 4 (wow you're lonely)"
else:
em.title = f"{player2} challenged {player1} to a game of Connect 4"
em.description = f"{getDisplay()}"
em.color = 0x444444
em.add_field(
name=f"{player1}",
value=f"Enter a valid number from 1-{width}",
inline=False,
)
await boardMessage.edit(embed=em)
badInput += 1
if badInput == 3:
em = discord.Embed()
if player1 == player2:
em.title = f"{player2} challenged themselves to a game of Connect 4 (wow you're lonely)"
else:
em.title = f"{player2} challenged {player1} to a game of Connect 4"
em.description = f"{getDisplay()}"
em.color = 0x444444
em.add_field(
name=f"{player1}",
value="Did not enter a valid number in 3 tries. Game ended.",
inline=False,
)
await boardMessage.edit(embed=em)
return
winningComment = ""
winner = ""
while gameLoop:
if turns == width * height:
winner = None
break
################################
# check for winning combinations#
################################
# Horizontal
for y in range(height):
for x in range(width - 3):
if (
board[y][x] == board[y][x + 1]
and board[y][x] == board[y][x + 2]
and board[y][x] == board[y][x + 3]
and board[y][x] != s
):
if board[y][x] == p1:
board[y][x] = ":large_blue_diamond:"
board[y][x + 1] = ":large_blue_diamond:"
board[y][x + 2] = ":large_blue_diamond:"
board[y][x + 3] = ":large_blue_diamond:"
elif board[y][x] == p2:
board[y][x] = ":diamonds:"
board[y][x + 1] = ":diamonds:"
board[y][x + 2] = ":diamonds:"
board[y][x + 3] = ":diamonds:"
print("winner")
winner = otherPlayer
winningComment = (
f"{otherPlayer} connected 4 in a horizontal row"
)
break
if winner != "":
break
# Vertical
for y in range(height - 3):
for x in range(width):
if (
board[y][x] == board[y + 1][x]
and board[y][x] == board[y + 2][x]
and board[y][x] == board[y + 3][x]
and board[y][x] != s
):
if board[y][x] == p1:
board[y][x] = ":large_blue_diamond:"
board[y + 1][x] = ":large_blue_diamond:"
board[y + 2][x] = ":large_blue_diamond:"
board[y + 3][x] = ":large_blue_diamond:"
elif board[y][x] == p2:
board[y][x] = ":diamonds:"
board[y + 1][x] = ":diamonds:"
board[y + 2][x] = ":diamonds:"
board[y + 3][x] = ":diamonds:"
winner = otherPlayer
winningComment = f"{otherPlayer} connected 4 in a vertical row"
break
if winner != "":
break
# diagonal \
for y in range(height - 3):
for x in range(width - 3):
if (
board[y][x] == board[y + 1][x + 1]
and board[y][x] == board[y + 2][x + 2]
and board[y][x] == board[y + 3][x + 3]
and board[y][x] != s
):
if board[y][x] == p1:
board[y][x] = ":large_blue_diamond:"
board[y + 1][x + 1] = ":large_blue_diamond:"
board[y + 2][x + 2] = ":large_blue_diamond:"
board[y + 3][x + 3] = ":large_blue_diamond:"
elif board[y][x] == p2:
board[y][x] = ":diamonds:"
board[y + 1][x + 1] = ":diamonds:"
board[y + 2][x + 2] = ":diamonds:"
board[y + 3][x + 3] = ":diamonds:"
winner = otherPlayer
winningComment = f"{otherPlayer} connected 4 in a \ diagonal"
break
if winner != "":
break
# diagonal /
for y in range(height - 3):
for x in range(3, width):
if (
board[y][x] == board[y + 1][x - 1]
and board[y][x] == board[y + 2][x - 2]
and board[y][x] == board[y + 3][x - 3]
and board[y][x] != s
):
if board[y][x] == p1:
board[y][x] = ":large_blue_diamond:"
board[y + 1][x - 1] = ":large_blue_diamond:"
board[y + 2][x - 2] = ":large_blue_diamond:"
board[y + 3][x - 3] = ":large_blue_diamond:"
elif board[y][x] == p2:
board[y][x] = ":diamonds:"
board[y + 1][x - 1] = ":diamonds:"
board[y + 2][x - 2] = ":diamonds:"
board[y + 3][x - 3] = ":diamonds:"
winner = otherPlayer
winningComment = f"{otherPlayer} connected 4 in a / diagonal"
break
if winner != "":
break
if winner != "":
break
################################
em = discord.Embed()
em.title = f"Connect 4"
em.description = f"{getDisplay()}"
em.color = 0x444444
em.add_field(
name=f"Turn {turns}: {currentPlayer} turn",
value=f"Enter a value from 1-{width}. You have 30 seconds to make a choice",
inline=True,
)
await boardMessage.edit(embed=em)
gotValidInput = False
badInput = 0
while not gotValidInput:
try:
msg = await self.bot.wait_for(
"message",
check=lambda message: message.author.name == currentPlayer,
timeout=30,
)
await ctx.channel.delete_messages(await self.getMessages(ctx, 1))
slot = int(msg.content)
if slot < 1 or slot > width:
raise ValueError
# Place piece in slot
for y in range(height - 1, -1, -1):
if board[y][slot - 1] == s:
if currentPlayerId == 1:
board[y][slot - 1] = p1
break
else:
board[y][slot - 1] = p2
break
elif y == 0: # if column is full
raise ValueError
# switch player
if currentPlayerId == 1:
currentPlayer = player1
otherPlayer = player2
currentPlayerId = 2
else:
currentPlayer = player1
otherPlayer = player2
currentPlayerId = 1
gotValidInput = True
turns += 1
break
except asyncio.exceptions.TimeoutError:
winner = otherPlayer
winningComment = f"{currentPlayer} took too much time"
gameLoop = False
break
except ValueError:
em = discord.Embed()
em.title = f"Connect 4"
em.description = f"{getDisplay()}"
em.color = 0x444444
em.add_field(
name=f"Turn {turns}: {currentPlayer}",
value=f"Enter a valid number from 1-{width}",
inline=False,
)
await boardMessage.edit(embed=em)
badInput += 1
if badInput == 3:
winner = otherPlayer
winningComment = f"{currentPlayer} had too many bad inputs"
gameLoop = False
break
if winner == None:
em = discord.Embed()
em.title = f"Connect 4 - Tie, No Winners"
em.description = f"{getDisplay()}"
em.color = 0x444444
await boardMessage.edit(embed=em)
elif winner == player1:
em = discord.Embed()
em.title = f"Connect 4 - {player1} wins!"
em.description = f"{getDisplay()}"
em.add_field(name="Reason:", value=f"{winningComment}", inline=False)
if player1 == player2:
em.add_field(
name="Also:", value=f"They won against themself", inline=False
)
em.color = 0x444444
await boardMessage.edit(embed=em)
elif winner == player2:
em = discord.Embed()
em.title = f"Connect 4 - {player2} wins!"
em.description = f"{getDisplay()}"
em.add_field(name="Reason:", value=f"{winningComment}", inline=False)
if player1 == player2:
em.add_field(
name="Also:", value=f"They won against themself", inline=False
)
em.color = 0x444444
await boardMessage.edit(embed=em)
@commands.command(name="21dares", aliases=["truth_or_dare"])
async def tod(self, ctx, user: discord.Member = None, channel=discord.TextChannel):
"""21 truth or dare party game"""
the_author = ctx.author
channel = ctx.channel
if user is None:
embed = discord.Embed(
title="Truth or Dare game",
color=discord.Colour.orange(),
description=f"{the_author.mention} is inviting anyone to play truth or dare! \n\nType `accept` now to accept the challenge and begin a game with them.",
)
elif user != the_author and not user.bot:
embed = discord.Embed(
title=" truth or dare",
color=discord.Colour.orange(),
description=f"{the_author.mention} is inviting anyone to play truth or dare! \n\nType `accept` now to accept the challenge and begin a game with them.",
)
else:
embed = discord.Embed(
title="You can't invite yourself or a discord bot to a game!"
)
msg = await channel.send(embed=embed)
playerlist = []
check_list = []
count_list = []
current_count = 0
def checkConsecutive(l):
return sorted(l) == list(range(min(l), max(l) + 1))
def check(message):
if message.channel == channel:
return True
while True:
msg = await self.bot.wait_for("message", check=check)
if msg.content == "accept":
await ctx.send(
f"{msg.author.mention} accepted! type `start` before 60 seconds to start"
)
playerlist.append(msg.author)
elif msg.content == "start" and len(playerlist) >= 2:
await ctx.send(f"Game started!Start by typing `1 2 ..`")
for player in itertools.cycle(playerlist):
embedlost = discord.Embed(
title=f"{player} lost! choose `Truth or Dare?!`"
)
def check1(message):
if message.author == player and message.channel == channel:
return True
await ctx.send(f"{player.mention}'s turn! :timer~1:")
n = await self.bot.wait_for("message", timeout=60.0, check=check1)
if n.content == "cancel" or n.content == "stop":
break
if n.author == player and n.author != self.bot.user:
listn = n.content.split(" ")
for element in listn:
element = int(element)
check_list.append(element)
if current_count >= 21 or 21 in check_list:
await ctx.send(embed=embedlost)
check_list.clear()
current_count = 0
break
elif check_list[0] == current_count + 1:
if checkConsecutive(check_list) == True:
for element in check_list:
count_list.append(element)
current_count = count_list[-1]
check_list.clear()
await n.add_reaction("✅")
else:
await ctx.send(
"Numbers not consecutive! YOU SPOLIED THE GAME! YOU LOSE!"
)
await ctx.send(embed=embedlost)
check_list.clear()
current_count = 0
break
else:
await ctx.send(
f"Dude you have to start from {current_count+1}! YOU SPOLIED THE GAME! YOU LOSE!"
)
await ctx.send(embed=embedlost)
check_list.clear()
current_count = 0
break
elif msg.content == "start" and len(playerlist) < 2:
await ctx.send("Can't start, less than 2 players")
elif msg.content == "cancel":
await ctx.send("Game Cancelled")
break
@commands.command()
async def tic(self, ctx: commands.Context):
"""Starts a tic-tac-toe game with yourself."""
await ctx.send("Tic Tac Toe: X goes first", view=TicTacToe())
async def getMessages(self, ctx: commands.Context, number: int = 1):
if number == 0:
return []
toDelete = []
async for x in ctx.channel.history(limit=number):
toDelete.append(x)
return toDelete
@commands.command(name="2048")
async def twenty(self, ctx):
"""Play 2048 game"""
await twenty.play(ctx, self.bot)
@commands.command(name="hangman", aliases=["hang"])
async def hangman(self, ctx):
"""Play Hangman"""
await hangman.play(self.bot, ctx)
@commands.command(name="rps", aliases=["rockpaperscissors"])
async def rps(self, ctx):
"""Play Rock, Paper, Scissors game"""
def check_win(p, b):
if p == "🌑":
return False if b == "📄" else True
if p == "📄":
return False if b == "✂" else True
# p=='✂'
return False if b == "🌑" else True
async with ctx.typing():
reactions = ["🌑", "📄", "✂"]
game_message = await ctx.send(
"**Rock Paper Scissors**\nChoose your shape:", delete_after=15.0
)
for reaction in reactions:
await game_message.add_reaction(reaction)
bot_emoji = random.choice(reactions)
def check(reaction, user):
return (
user != self.bot.user
and user == ctx.author
and (str(reaction.emoji) == "🌑" or "📄" or "✂")
)
try:
reaction, _ = await self.bot.wait_for(
"reaction_add", timeout=10.0, check=check
)
except asyncio.TimeoutError:
await ctx.send("Time's Up! :stopwatch:")
else:
await ctx.send(
f"**:man_in_tuxedo_tone1:\t{reaction.emoji}\n:robot:\t{bot_emoji}**"
)
# if conds
if str(reaction.emoji) == bot_emoji:
await ctx.send("**It's a Tie :ribbon:**")
elif check_win(str(reaction.emoji), bot_emoji):
await ctx.send("**You win :sparkles:**")
else:
await ctx.send("**I win :robot:**")
def setup(bot):
bot.add_cog(Games(bot))
|
py | b40dfcf85d153c9c9744cf3f4cbaa91db029460b | import h5py
import numpy as np
import torch
dt_string = h5py.special_dtype(vlen=str)
def get_value(h5_file, key):
return h5_file[key][0]
def store_value(h5_file, key, value):
if isinstance(value, str):
# dt_string = h5py.special_dtype(vlen=str)
value_h5 = h5_file.create_dataset(key, (1,), dtype=dt_string)
elif isinstance(value, int):
value_h5 = h5_file.create_dataset(key, (1,), dtype=np.int64)
elif isinstance(value, float):
value_h5 = h5_file.create_dataset(key, (1,), dtype=np.float32)
else:
raise Exception('unhandled value type ' + str(type(value)) + ' for ' + key)
value_h5[0] = value
def store_tensor(h5_file, key, value):
size = list(value.size())
if value.dtype == torch.int8:
value = value.byte()
torch_dtype = value.dtype
print(value.dtype)
np_dtype = {
torch.int64: np.int64,
torch.uint8: np.uint8,
torch.float32: np.float32,
}[torch_dtype]
h5_ds = h5_file.create_dataset(key, size, dtype=np_dtype)
print('writing', key, '...')
h5_ds[:] = value
print(' ... done')
class H5Wrapper(object):
def __init__(self, h5_f):
self.h5_f = h5_f
def store_value(self, key,value):
store_value(self.h5_f, key, value)
def store_tensor(self, key, value):
store_tensor(self.h5_f, key, value)
def get_value(self, key):
return get_value(self.h5_f, key)
|
py | b40dfe312fd8943f84de08596adba0c9b9ce1f32 | version="0.2.4-mauranolab"
|
py | b40dff0d72d6e597ddec0e59fc4b54ae145208ac | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-02 16:52
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20170301_1734'),
]
operations = [
migrations.AlterField(
model_name='user',
name='username',
field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.ASCIIUsernameValidator()], verbose_name='username'),
),
]
|
py | b40dffd3313f565029ab46ca6bd03ea3c22d9e65 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from grafeas_v1 import GrafeasClient
from grafeas_v1 import enums
from grafeas_v1 import types
__all__ = (
'enums',
'types',
'GrafeasClient',
)
|
py | b40e001ccd0e35f1271ad75c19f52b9c3c6e0e57 | """
DataProvider related exceptions.
"""
class InvalidDataProviderSource(TypeError):
"""
Raised when a unusable source is passed to a provider.
"""
def __init__(self, source=None, msg=""):
msg = msg or f"Invalid source for provider: {source}"
super().__init__(msg)
class NoProviderAvailable(TypeError):
"""
Raised when no provider is found for the given `format_requested`.
:param factory_source: the item that the provider was requested from
:param format_requested: the format_requested (a hashable key to access
`factory_source.datatypes` with)
Both params are attached to this class and accessible to the try-catch
receiver.
Meant to be used within a class that builds dataproviders (e.g. a Datatype)
"""
def __init__(self, factory_source, format_requested=None, msg=""):
self.factory_source = factory_source
self.format_requested = format_requested
msg = msg or f'No provider available in factory_source "{str(factory_source)}" for format requested'
if self.format_requested:
msg += f': "{self.format_requested}"'
super().__init__(msg)
|
py | b40e00c2bf370adcbde1141df3489a876f4cfc8c | import pandas as pd
from dockstream.core.result_parser import ResultParser
from dockstream.utils.enums.rDock_enums import rDockRbdockOutputEnum, rDockResultKeywordsEnum
class rDockResultParser(ResultParser):
"""Class that loads, parses and analyzes the output of an "rDock" docking run, including poses and scores."""
def __init__(self, ligands):
super().__init__(ligands=ligands)
self._ROE = rDockRbdockOutputEnum()
self._RK = rDockResultKeywordsEnum()
self._df_results = self._construct_dataframe()
def _construct_dataframe(self) -> pd.DataFrame:
def func_get_score(conformer):
return float(conformer.GetProp(self._ROE.SCORE))
return super()._construct_dataframe_with_funcobject(func_get_score)
|
py | b40e011a0ff9d53b787e9b572b17b1f27cedf7f1 | from typing import List, Union
from collections import OrderedDict
from pathlib import Path
import torch
from torch import nn
import torchvision
from catalyst import utils
from .core import _take, EncoderSpec
RESNET_PARAMS = {
"resnet18": {
"channels": [64, 64, 128, 256, 512],
"strides": [2, 4, 8, 16, 32],
},
"resnet34": {
"channels": [64, 64, 128, 256, 512],
"strides": [2, 4, 8, 16, 32],
},
"resnet50": {
"channels": [64, 256, 512, 1024, 2048],
"strides": [2, 4, 8, 16, 32],
},
"resnet101": {
"channels": [64, 256, 512, 1024, 2048],
"strides": [2, 4, 8, 16, 32],
},
"resnet152": {
"channels": [64, 256, 512, 1024, 2048],
"strides": [2, 4, 8, 16, 32],
},
}
class ResnetEncoder(EncoderSpec):
def __init__(
self,
arch: str = "resnet18",
pretrained: bool = True,
requires_grad: bool = True,
layers_indices: List[int] = None,
state_dict: Union[dict, str, Path] = None,
):
"""
Specifies encoders for segmentation network
Args:
arch (str): Name for resnet. Have to be one of
resnet18, resnet34, resnet50, resnet101, resnet152
pretrained (bool): If True, returns a model pre-trained on ImageNet
requires_grad (bool): Flag for set_requires_grad.
If None, calculates as ``not requires_grad``
layers_indices (List[int]): layers of encoders
used for segmentation
If None, calculates as ``[1, 2, 3, 4]``
state_dict (Union[dict, str, Path]): Path to ``torch.Model``
or a dict containing parameters and persistent buffers.
Examples:
>>> encoders = ResnetEncoder(
>>> arch="resnet18",
>>> pretrained=False,
>>> state_dict="/model/path/resnet18-5c106cde.pth"
>>> )
"""
super().__init__()
resnet = torchvision.models.__dict__[arch](pretrained=pretrained)
resnet_params = RESNET_PARAMS[arch]
if state_dict is not None:
if isinstance(state_dict, (Path, str)):
state_dict = torch.load(str(state_dict))
resnet.load_state_dict(state_dict)
self._layers_indices = layers_indices or [1, 2, 3, 4]
self._channels, self._strides = (
resnet_params["channels"],
resnet_params["strides"],
)
self._channels = _take(self._channels, self._layers_indices)
self._strides = _take(self._strides, self._layers_indices)
layer0 = nn.Sequential(
OrderedDict(
[
("conv1", resnet.conv1),
("bn1", resnet.bn1),
("relu", resnet.relu),
]
)
)
self._layers = nn.ModuleList(
[layer0, resnet.layer1, resnet.layer2, resnet.layer3, resnet.layer4]
)
self.maxpool0 = resnet.maxpool
if requires_grad is None:
requires_grad = not pretrained
utils.set_requires_grad(self, requires_grad)
@property
def out_channels(self) -> List[int]:
return self._channels
@property
def out_strides(self) -> List[int]:
return self._strides
def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
output = []
for i, layer in enumerate(self._layers):
layer_output = layer(x)
output.append(layer_output)
if i == 0:
# Fist maxpool operator is not a part of layer0
# because we want that layer0 output to have stride of 2
layer_output = self.maxpool0(layer_output)
x = layer_output
output = _take(output, self._layers_indices)
return output
|
py | b40e023db1eb5544d9786605001e6664097a504f | from rest_framework import serializers
from .models import Currency, Transaction, Wallet
class TransactionSerializer(serializers.ModelSerializer):
class Meta:
model = Transaction
fields = "__all__"
depth = 1
class WalletSerializer(serializers.ModelSerializer):
class Meta:
model = Wallet
fields = ["id"]
class CurrencySerializer(serializers.ModelSerializer):
class Meta:
model = Currency
fields = "__all__"
|
py | b40e0286ecf2019a071bd5c7e697d6d4e2029f28 | # -*- coding: utf-8 -*-
"""
pygments.lexers.graphics
~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for computer graphics and plotting related languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, words, include, bygroups, using, \
this, default
from pygments.token import Text, Comment, Operator, Keyword, Name, \
Number, Punctuation, String
__all__ = ['GLShaderLexer', 'PostScriptLexer', 'AsymptoteLexer', 'GnuplotLexer',
'PovrayLexer']
class GLShaderLexer(RegexLexer):
"""
GLSL (OpenGL Shader) lexer.
.. versionadded:: 1.1
"""
name = 'GLSL'
aliases = ['glsl']
filenames = ['*.vert', '*.frag', '*.geo']
mimetypes = ['text/x-glslsrc']
tokens = {
'root': [
(r'^#.*', Comment.Preproc),
(r'//.*', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'\+|-|~|!=?|\*|/|%|<<|>>|<=?|>=?|==?|&&?|\^|\|\|?',
Operator),
(r'[?:]', Operator), # quick hack for ternary
(r'\bdefined\b', Operator),
(r'[;{}(),\[\]]', Punctuation),
# FIXME when e is present, no decimal point needed
(r'[+-]?\d*\.\d+([eE][-+]?\d+)?', Number.Float),
(r'[+-]?\d+\.\d*([eE][-+]?\d+)?', Number.Float),
(r'0[xX][0-9a-fA-F]*', Number.Hex),
(r'0[0-7]*', Number.Oct),
(r'[1-9][0-9]*', Number.Integer),
(words((
'attribute', 'const', 'uniform', 'varying', 'centroid', 'break',
'continue', 'do', 'for', 'while', 'if', 'else', 'in', 'out',
'inout', 'float', 'int', 'void', 'bool', 'true', 'false',
'invariant', 'discard', 'return', 'mat2', 'mat3' 'mat4',
'mat2x2', 'mat3x2', 'mat4x2', 'mat2x3', 'mat3x3', 'mat4x3',
'mat2x4', 'mat3x4', 'mat4x4', 'vec2', 'vec3', 'vec4',
'ivec2', 'ivec3', 'ivec4', 'bvec2', 'bvec3', 'bvec4',
'sampler1D', 'sampler2D', 'sampler3D' 'samplerCube',
'sampler1DShadow', 'sampler2DShadow', 'struct'),
prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'asm', 'class', 'union', 'enum', 'typedef', 'template', 'this',
'packed', 'goto', 'switch', 'default', 'inline', 'noinline',
'volatile', 'public', 'static', 'extern', 'external', 'interface',
'long', 'short', 'double', 'half', 'fixed', 'unsigned', 'lowp',
'mediump', 'highp', 'precision', 'input', 'output',
'hvec2', 'hvec3', 'hvec4', 'dvec2', 'dvec3', 'dvec4',
'fvec2', 'fvec3', 'fvec4', 'sampler2DRect', 'sampler3DRect',
'sampler2DRectShadow', 'sizeof', 'cast', 'namespace', 'using'),
prefix=r'\b', suffix=r'\b'),
Keyword), # future use
(r'[a-zA-Z_]\w*', Name),
(r'\.', Punctuation),
(r'\s+', Text),
],
}
class PostScriptLexer(RegexLexer):
"""
Lexer for PostScript files.
The PostScript Language Reference published by Adobe at
<http://partners.adobe.com/public/developer/en/ps/PLRM.pdf>
is the authority for this.
.. versionadded:: 1.4
"""
name = 'PostScript'
aliases = ['postscript', 'postscr']
filenames = ['*.ps', '*.eps']
mimetypes = ['application/postscript']
delimiter = r'()<>\[\]{}/%\s'
delimiter_end = r'(?=[%s])' % delimiter
valid_name_chars = r'[^%s]' % delimiter
valid_name = r"%s+%s" % (valid_name_chars, delimiter_end)
tokens = {
'root': [
# All comment types
(r'^%!.+\n', Comment.Preproc),
(r'%%.*\n', Comment.Special),
(r'(^%.*\n){2,}', Comment.Multiline),
(r'%.*\n', Comment.Single),
# String literals are awkward; enter separate state.
(r'\(', String, 'stringliteral'),
(r'[{}<>\[\]]', Punctuation),
# Numbers
(r'<[0-9A-Fa-f]+>' + delimiter_end, Number.Hex),
# Slight abuse: use Oct to signify any explicit base system
(r'[0-9]+\#(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)'
r'((e|E)[0-9]+)?' + delimiter_end, Number.Oct),
(r'(\-|\+)?([0-9]+\.?|[0-9]*\.[0-9]+|[0-9]+\.[0-9]*)((e|E)[0-9]+)?'
+ delimiter_end, Number.Float),
(r'(\-|\+)?[0-9]+' + delimiter_end, Number.Integer),
# References
(r'\/%s' % valid_name, Name.Variable),
# Names
(valid_name, Name.Function), # Anything else is executed
# These keywords taken from
# <http://www.math.ubc.ca/~cass/graphics/manual/pdf/a1.pdf>
# Is there an authoritative list anywhere that doesn't involve
# trawling documentation?
(r'(false|true)' + delimiter_end, Keyword.Constant),
# Conditionals / flow control
(r'(eq|ne|g[et]|l[et]|and|or|not|if(?:else)?|for(?:all)?)'
+ delimiter_end, Keyword.Reserved),
(words((
'abs', 'add', 'aload', 'arc', 'arcn', 'array', 'atan', 'begin',
'bind', 'ceiling', 'charpath', 'clip', 'closepath', 'concat',
'concatmatrix', 'copy', 'cos', 'currentlinewidth', 'currentmatrix',
'currentpoint', 'curveto', 'cvi', 'cvs', 'def', 'defaultmatrix',
'dict', 'dictstackoverflow', 'div', 'dtransform', 'dup', 'end',
'exch', 'exec', 'exit', 'exp', 'fill', 'findfont', 'floor', 'get',
'getinterval', 'grestore', 'gsave', 'gt', 'identmatrix', 'idiv',
'idtransform', 'index', 'invertmatrix', 'itransform', 'length',
'lineto', 'ln', 'load', 'log', 'loop', 'matrix', 'mod', 'moveto',
'mul', 'neg', 'newpath', 'pathforall', 'pathbbox', 'pop', 'print',
'pstack', 'put', 'quit', 'rand', 'rangecheck', 'rcurveto', 'repeat',
'restore', 'rlineto', 'rmoveto', 'roll', 'rotate', 'round', 'run',
'save', 'scale', 'scalefont', 'setdash', 'setfont', 'setgray',
'setlinecap', 'setlinejoin', 'setlinewidth', 'setmatrix',
'setrgbcolor', 'shfill', 'show', 'showpage', 'sin', 'sqrt',
'stack', 'stringwidth', 'stroke', 'strokepath', 'sub', 'syntaxerror',
'transform', 'translate', 'truncate', 'typecheck', 'undefined',
'undefinedfilename', 'undefinedresult'), suffix=delimiter_end),
Name.Builtin),
(r'\s+', Text),
],
'stringliteral': [
(r'[^()\\]+', String),
(r'\\', String.Escape, 'escape'),
(r'\(', String, '#push'),
(r'\)', String, '#pop'),
],
'escape': [
(r'[0-8]{3}|n|r|t|b|f|\\|\(|\)', String.Escape, '#pop'),
default('#pop'),
],
}
class AsymptoteLexer(RegexLexer):
"""
For `Asymptote <http://asymptote.sf.net/>`_ source code.
.. versionadded:: 1.2
"""
name = 'Asymptote'
aliases = ['asy', 'asymptote']
filenames = ['*.asy']
mimetypes = ['text/x-asymptote']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/\*.*?\*/)+'
tokens = {
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment),
],
'statements': [
# simple string (TeX friendly)
(r'"(\\\\|\\"|[^"])*"', String),
# C style string (with character escapes)
(r"'", String, 'string'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
(r'(and|controls|tension|atleast|curl|if|else|while|for|do|'
r'return|break|continue|struct|typedef|new|access|import|'
r'unravel|from|include|quote|static|public|private|restricted|'
r'this|explicit|true|false|null|cycle|newframe|operator)\b', Keyword),
# Since an asy-type-name can be also an asy-function-name,
# in the following we test if the string " [a-zA-Z]" follows
# the Keyword.Type.
# Of course it is not perfect !
(r'(Braid|FitResult|Label|Legend|TreeNode|abscissa|arc|arrowhead|'
r'binarytree|binarytreeNode|block|bool|bool3|bounds|bqe|circle|'
r'conic|coord|coordsys|cputime|ellipse|file|filltype|frame|grid3|'
r'guide|horner|hsv|hyperbola|indexedTransform|int|inversion|key|'
r'light|line|linefit|marginT|marker|mass|object|pair|parabola|path|'
r'path3|pen|picture|point|position|projection|real|revolution|'
r'scaleT|scientific|segment|side|slice|splitface|string|surface|'
r'tensionSpecifier|ticklocate|ticksgridT|tickvalues|transform|'
r'transformation|tree|triangle|trilinear|triple|vector|'
r'vertex|void)(?=\s+[a-zA-Z])', Keyword.Type),
# Now the asy-type-name which are not asy-function-name
# except yours !
# Perhaps useless
(r'(Braid|FitResult|TreeNode|abscissa|arrowhead|block|bool|bool3|'
r'bounds|coord|frame|guide|horner|int|linefit|marginT|pair|pen|'
r'picture|position|real|revolution|slice|splitface|ticksgridT|'
r'tickvalues|tree|triple|vertex|void)\b', Keyword.Type),
('[a-zA-Z_]\w*:(?!:)', Name.Label),
('[a-zA-Z_]\w*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(\{)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation),
'function'),
# function declarations
(r'((?:[\w*\s])+?(?:\s|\*))' # return arguments
r'([a-zA-Z_]\w*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function, using(this), using(this),
Punctuation)),
default('statement'),
],
'statement': [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'function': [
include('whitespace'),
include('statements'),
(';', Punctuation),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
],
'string': [
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\'?]|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'\n', String),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String),
(r'\\n', String), # line continuation
(r'\\', String), # stray backslash
],
}
def get_tokens_unprocessed(self, text):
from pygments.lexers._asy_builtins import ASYFUNCNAME, ASYVARNAME
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name and value in ASYFUNCNAME:
token = Name.Function
elif token is Name and value in ASYVARNAME:
token = Name.Variable
yield index, token, value
def _shortened(word):
dpos = word.find('$')
return '|'.join(word[:dpos] + word[dpos+1:i] + r'\b'
for i in range(len(word), dpos, -1))
def _shortened_many(*words):
return '|'.join(map(_shortened, words))
class GnuplotLexer(RegexLexer):
"""
For `Gnuplot <http://gnuplot.info/>`_ plotting scripts.
.. versionadded:: 0.11
"""
name = 'Gnuplot'
aliases = ['gnuplot']
filenames = ['*.plot', '*.plt']
mimetypes = ['text/x-gnuplot']
tokens = {
'root': [
include('whitespace'),
(_shortened('bi$nd'), Keyword, 'bind'),
(_shortened_many('ex$it', 'q$uit'), Keyword, 'quit'),
(_shortened('f$it'), Keyword, 'fit'),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 'if'),
(r'else\b', Keyword),
(_shortened('pa$use'), Keyword, 'pause'),
(_shortened_many('p$lot', 'rep$lot', 'sp$lot'), Keyword, 'plot'),
(_shortened('sa$ve'), Keyword, 'save'),
(_shortened('se$t'), Keyword, ('genericargs', 'optionarg')),
(_shortened_many('sh$ow', 'uns$et'),
Keyword, ('noargs', 'optionarg')),
(_shortened_many('low$er', 'ra$ise', 'ca$ll', 'cd$', 'cl$ear',
'h$elp', '\\?$', 'hi$story', 'l$oad', 'pr$int',
'pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'sy$stem', 'up$date'),
Keyword, 'genericargs'),
(_shortened_many('pwd$', 're$read', 'res$et', 'scr$eendump',
'she$ll', 'test$'),
Keyword, 'noargs'),
('([a-zA-Z_]\w*)(\s*)(=)',
bygroups(Name.Variable, Text, Operator), 'genericargs'),
('([a-zA-Z_]\w*)(\s*\(.*?\)\s*)(=)',
bygroups(Name.Function, Text, Operator), 'genericargs'),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r';', Keyword),
],
'comment': [
(r'[^\\\n]', Comment),
(r'\\\n', Comment),
(r'\\', Comment),
# don't add the newline to the Comment token
default('#pop'),
],
'whitespace': [
('#', Comment, 'comment'),
(r'[ \t\v\f]+', Text),
],
'noargs': [
include('whitespace'),
# semicolon and newline end the argument list
(r';', Punctuation, '#pop'),
(r'\n', Text, '#pop'),
],
'dqstring': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'sqstring': [
(r"''", String), # escaped single quote
(r"'", String, '#pop'),
(r"[^\\'\n]+", String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # normal backslash
(r'\n', String, '#pop'), # newline ends the string too
],
'genericargs': [
include('noargs'),
(r'"', String, 'dqstring'),
(r"'", String, 'sqstring'),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'-?\d+', Number.Integer),
('[,.~!%^&*+=|?:<>/-]', Operator),
('[{}()\[\]]', Punctuation),
(r'(eq|ne)\b', Operator.Word),
(r'([a-zA-Z_]\w*)(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-zA-Z_]\w*', Name),
(r'@[a-zA-Z_]\w*', Name.Constant), # macros
(r'\\\n', Text),
],
'optionarg': [
include('whitespace'),
(_shortened_many(
"a$ll", "an$gles", "ar$row", "au$toscale", "b$ars", "bor$der",
"box$width", "cl$abel", "c$lip", "cn$trparam", "co$ntour", "da$ta",
"data$file", "dg$rid3d", "du$mmy", "enc$oding", "dec$imalsign",
"fit$", "font$path", "fo$rmat", "fu$nction", "fu$nctions", "g$rid",
"hid$den3d", "his$torysize", "is$osamples", "k$ey", "keyt$itle",
"la$bel", "li$nestyle", "ls$", "loa$dpath", "loc$ale", "log$scale",
"mac$ros", "map$ping", "map$ping3d", "mar$gin", "lmar$gin",
"rmar$gin", "tmar$gin", "bmar$gin", "mo$use", "multi$plot",
"mxt$ics", "nomxt$ics", "mx2t$ics", "nomx2t$ics", "myt$ics",
"nomyt$ics", "my2t$ics", "nomy2t$ics", "mzt$ics", "nomzt$ics",
"mcbt$ics", "nomcbt$ics", "of$fsets", "or$igin", "o$utput",
"pa$rametric", "pm$3d", "pal$ette", "colorb$ox", "p$lot",
"poi$ntsize", "pol$ar", "pr$int", "obj$ect", "sa$mples", "si$ze",
"st$yle", "su$rface", "table$", "t$erminal", "termo$ptions", "ti$cs",
"ticsc$ale", "ticsl$evel", "timef$mt", "tim$estamp", "tit$le",
"v$ariables", "ve$rsion", "vi$ew", "xyp$lane", "xda$ta", "x2da$ta",
"yda$ta", "y2da$ta", "zda$ta", "cbda$ta", "xl$abel", "x2l$abel",
"yl$abel", "y2l$abel", "zl$abel", "cbl$abel", "xti$cs", "noxti$cs",
"x2ti$cs", "nox2ti$cs", "yti$cs", "noyti$cs", "y2ti$cs", "noy2ti$cs",
"zti$cs", "nozti$cs", "cbti$cs", "nocbti$cs", "xdti$cs", "noxdti$cs",
"x2dti$cs", "nox2dti$cs", "ydti$cs", "noydti$cs", "y2dti$cs",
"noy2dti$cs", "zdti$cs", "nozdti$cs", "cbdti$cs", "nocbdti$cs",
"xmti$cs", "noxmti$cs", "x2mti$cs", "nox2mti$cs", "ymti$cs",
"noymti$cs", "y2mti$cs", "noy2mti$cs", "zmti$cs", "nozmti$cs",
"cbmti$cs", "nocbmti$cs", "xr$ange", "x2r$ange", "yr$ange",
"y2r$ange", "zr$ange", "cbr$ange", "rr$ange", "tr$ange", "ur$ange",
"vr$ange", "xzeroa$xis", "x2zeroa$xis", "yzeroa$xis", "y2zeroa$xis",
"zzeroa$xis", "zeroa$xis", "z$ero"), Name.Builtin, '#pop'),
],
'bind': [
('!', Keyword, '#pop'),
(_shortened('all$windows'), Name.Builtin),
include('genericargs'),
],
'quit': [
(r'gnuplot\b', Keyword),
include('noargs'),
],
'fit': [
(r'via\b', Name.Builtin),
include('plot'),
],
'if': [
(r'\)', Punctuation, '#pop'),
include('genericargs'),
],
'pause': [
(r'(mouse|any|button1|button2|button3)\b', Name.Builtin),
(_shortened('key$press'), Name.Builtin),
include('genericargs'),
],
'plot': [
(_shortened_many('ax$es', 'axi$s', 'bin$ary', 'ev$ery', 'i$ndex',
'mat$rix', 's$mooth', 'thru$', 't$itle',
'not$itle', 'u$sing', 'w$ith'),
Name.Builtin),
include('genericargs'),
],
'save': [
(_shortened_many('f$unctions', 's$et', 't$erminal', 'v$ariables'),
Name.Builtin),
include('genericargs'),
],
}
class PovrayLexer(RegexLexer):
"""
For `Persistence of Vision Raytracer <http://www.povray.org/>`_ files.
.. versionadded:: 0.11
"""
name = 'POVRay'
aliases = ['pov']
filenames = ['*.pov', '*.inc']
mimetypes = ['text/x-povray']
tokens = {
'root': [
(r'/\*[\w\W]*?\*/', Comment.Multiline),
(r'//.*\n', Comment.Single),
(r'(?s)"(?:\\.|[^"\\])+"', String.Double),
(words((
'break', 'case', 'debug', 'declare', 'default', 'define', 'else',
'elseif', 'end', 'error', 'fclose', 'fopen', 'for', 'if', 'ifdef',
'ifndef', 'include', 'local', 'macro', 'range', 'read', 'render',
'statistics', 'switch', 'undef', 'version', 'warning', 'while',
'write'), prefix=r'#', suffix=r'\b'),
Comment.Preproc),
(words((
'aa_level', 'aa_threshold', 'abs', 'acos', 'acosh', 'adaptive', 'adc_bailout',
'agate', 'agate_turb', 'all', 'alpha', 'ambient', 'ambient_light', 'angle',
'aperture', 'arc_angle', 'area_light', 'asc', 'asin', 'asinh', 'assumed_gamma',
'atan', 'atan2', 'atanh', 'atmosphere', 'atmospheric_attenuation',
'attenuating', 'average', 'background', 'black_hole', 'blue', 'blur_samples',
'bounded_by', 'box_mapping', 'bozo', 'break', 'brick', 'brick_size',
'brightness', 'brilliance', 'bumps', 'bumpy1', 'bumpy2', 'bumpy3', 'bump_map',
'bump_size', 'case', 'caustics', 'ceil', 'checker', 'chr', 'clipped_by', 'clock',
'color', 'color_map', 'colour', 'colour_map', 'component', 'composite', 'concat',
'confidence', 'conic_sweep', 'constant', 'control0', 'control1', 'cos', 'cosh',
'count', 'crackle', 'crand', 'cube', 'cubic_spline', 'cylindrical_mapping',
'debug', 'declare', 'default', 'degrees', 'dents', 'diffuse', 'direction',
'distance', 'distance_maximum', 'div', 'dust', 'dust_type', 'eccentricity',
'else', 'emitting', 'end', 'error', 'error_bound', 'exp', 'exponent',
'fade_distance', 'fade_power', 'falloff', 'falloff_angle', 'false',
'file_exists', 'filter', 'finish', 'fisheye', 'flatness', 'flip', 'floor',
'focal_point', 'fog', 'fog_alt', 'fog_offset', 'fog_type', 'frequency', 'gif',
'global_settings', 'glowing', 'gradient', 'granite', 'gray_threshold',
'green', 'halo', 'hexagon', 'hf_gray_16', 'hierarchy', 'hollow', 'hypercomplex',
'if', 'ifdef', 'iff', 'image_map', 'incidence', 'include', 'int', 'interpolate',
'inverse', 'ior', 'irid', 'irid_wavelength', 'jitter', 'lambda', 'leopard',
'linear', 'linear_spline', 'linear_sweep', 'location', 'log', 'looks_like',
'look_at', 'low_error_factor', 'mandel', 'map_type', 'marble', 'material_map',
'matrix', 'max', 'max_intersections', 'max_iteration', 'max_trace_level',
'max_value', 'metallic', 'min', 'minimum_reuse', 'mod', 'mortar',
'nearest_count', 'no', 'normal', 'normal_map', 'no_shadow', 'number_of_waves',
'octaves', 'off', 'offset', 'omega', 'omnimax', 'on', 'once', 'onion', 'open',
'orthographic', 'panoramic', 'pattern1', 'pattern2', 'pattern3',
'perspective', 'pgm', 'phase', 'phong', 'phong_size', 'pi', 'pigment',
'pigment_map', 'planar_mapping', 'png', 'point_at', 'pot', 'pow', 'ppm',
'precision', 'pwr', 'quadratic_spline', 'quaternion', 'quick_color',
'quick_colour', 'quilted', 'radial', 'radians', 'radiosity', 'radius', 'rainbow',
'ramp_wave', 'rand', 'range', 'reciprocal', 'recursion_limit', 'red',
'reflection', 'refraction', 'render', 'repeat', 'rgb', 'rgbf', 'rgbft', 'rgbt',
'right', 'ripples', 'rotate', 'roughness', 'samples', 'scale', 'scallop_wave',
'scattering', 'seed', 'shadowless', 'sin', 'sine_wave', 'sinh', 'sky', 'sky_sphere',
'slice', 'slope_map', 'smooth', 'specular', 'spherical_mapping', 'spiral',
'spiral1', 'spiral2', 'spotlight', 'spotted', 'sqr', 'sqrt', 'statistics', 'str',
'strcmp', 'strength', 'strlen', 'strlwr', 'strupr', 'sturm', 'substr', 'switch', 'sys',
't', 'tan', 'tanh', 'test_camera_1', 'test_camera_2', 'test_camera_3',
'test_camera_4', 'texture', 'texture_map', 'tga', 'thickness', 'threshold',
'tightness', 'tile2', 'tiles', 'track', 'transform', 'translate', 'transmit',
'triangle_wave', 'true', 'ttf', 'turbulence', 'turb_depth', 'type',
'ultra_wide_angle', 'up', 'use_color', 'use_colour', 'use_index', 'u_steps',
'val', 'variance', 'vaxis_rotate', 'vcross', 'vdot', 'version', 'vlength',
'vnormalize', 'volume_object', 'volume_rendered', 'vol_with_light',
'vrotate', 'v_steps', 'warning', 'warp', 'water_level', 'waves', 'while', 'width',
'wood', 'wrinkles', 'yes'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'bicubic_patch', 'blob', 'box', 'camera', 'cone', 'cubic', 'cylinder', 'difference',
'disc', 'height_field', 'intersection', 'julia_fractal', 'lathe',
'light_source', 'merge', 'mesh', 'object', 'plane', 'poly', 'polygon', 'prism',
'quadric', 'quartic', 'smooth_triangle', 'sor', 'sphere', 'superellipsoid',
'text', 'torus', 'triangle', 'union'), suffix=r'\b'),
Name.Builtin),
# TODO: <=, etc
(r'[\[\](){}<>;,]', Punctuation),
(r'[-+*/=]', Operator),
(r'\b(x|y|z|u|v)\b', Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
(r'[0-9]+\.[0-9]*', Number.Float),
(r'\.[0-9]+', Number.Float),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\s+', Text),
]
}
|
py | b40e0292b606346dbac585bd14d4a3278142a9f1 | import _plotly_utils.basevalidators
class ContourcarpetValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="contourcarpet", parent_name="layout.template.data", **kwargs
):
super(ContourcarpetValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Contourcarpet"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
py | b40e0333826475703c8e9d103f19d265a19798a3 | import os
import bpy
import bpy_extras
from ... import ops, plugin_prefs, registry, utils
from .. import imp
from . import utils as imp_utils
@registry.module_thing
class OpImportObject(ops.BaseOperator, bpy_extras.io_utils.ImportHelper):
bl_idname = 'xray_import.object'
bl_label = 'Import .object'
bl_description = 'Imports X-Ray object'
bl_options = {'UNDO'}
filter_glob = bpy.props.StringProperty(
default='*.object', options={'HIDDEN'}
)
directory = bpy.props.StringProperty(subtype="DIR_PATH")
files = bpy.props.CollectionProperty(
type=bpy.types.OperatorFileListElement
)
import_motions = plugin_prefs.PropObjectMotionsImport()
mesh_split_by_materials = plugin_prefs.PropObjectMeshSplitByMaterials()
shaped_bones = plugin_prefs.PropObjectBonesCustomShapes()
fmt_version = plugin_prefs.PropSDKVersion()
@utils.execute_with_logger
@utils.set_cursor_state
def execute(self, _context):
textures_folder = plugin_prefs.get_preferences().textures_folder_auto
objects_folder = plugin_prefs.get_preferences().objects_folder
if not textures_folder:
self.report({'WARNING'}, 'No textures folder specified')
if not self.files:
self.report({'ERROR'}, 'No files selected')
return {'CANCELLED'}
import_context = imp_utils.ImportContext(
textures=textures_folder,
soc_sgroups=self.fmt_version == 'soc',
import_motions=self.import_motions,
split_by_materials=self.mesh_split_by_materials,
operator=self,
objects=objects_folder
)
for file in self.files:
ext = os.path.splitext(file.name)[-1].lower()
if ext == '.object':
import_context.before_import_file()
imp.import_file(
os.path.join(self.directory, file.name), import_context
)
else:
self.report(
{'ERROR'}, 'Format of {} not recognised'.format(file)
)
return {'FINISHED'}
def draw(self, _context):
layout = self.layout
row = layout.row()
row.enabled = False
row.label('%d items' % len(self.files))
row = layout.split()
row.label('Format Version:')
row.row().prop(self, 'fmt_version', expand=True)
layout.prop(self, 'import_motions')
layout.prop(self, 'mesh_split_by_materials')
layout.prop(self, 'shaped_bones')
def invoke(self, context, event):
prefs = plugin_prefs.get_preferences()
self.fmt_version = prefs.sdk_version
self.import_motions = prefs.object_motions_import
self.mesh_split_by_materials = prefs.object_mesh_split_by_mat
self.shaped_bones = prefs.object_bones_custom_shapes
return super().invoke(context, event)
|
py | b40e03d6e19a46dd821b14e8373e8aa6a1cad9cb | import torch
from . import initialization as init
class SegmentationModel(torch.nn.Module):
def initialize(self):
init.initialize_decoder(self.decoder)
def forward(self, x):
"""Sequentially pass `x` trough model`s encoder, decoder and heads"""
features = self.encoder(x)
decoder_output = self.decoder(*features)
return decoder_output
def predict(self, x):
"""Inference method. Switch model to `eval` mode, call `.forward(x)` with `torch.no_grad()`
Args:
x: 4D torch tensor with shape (batch_size, channels, height, width)
Return:
prediction: 4D torch tensor with shape (batch_size, classes, height, width)
"""
if self.training:
self.eval()
with torch.no_grad():
x = self.forward(x)
return x
|
py | b40e049284dcde5c3dad10685b67884048ea71a5 | import boto3
import json
from boto3.dynamodb.conditions import Key, Attr
import datetime
from botocore.exceptions import ClientError
import decimal
# Helper class to convert a DynamoDB item to JSON.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
def country_query_history(event, context):
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('corona-db')
query_param = event['queryStringParameters']
pays = query_param['country']
response = table.query(
KeyConditionExpression=Key('country').eq(pays)
)
country_totalcases = []
for i in response['Items']:
country_totalcases.append(i)
body_response = json.dumps(response, indent=4, cls=DecimalEncoder)
return {
"statusCode": 200,
"body": body_response,
"headers": {
"Access-Control-Allow-Origin": "*"
}
}
|
py | b40e04e522b3c6a5d21e44825934f3ab587d5b97 | """
Migration script to create a table for page-user share association.
"""
from __future__ import print_function
import logging
from sqlalchemy import Column, ForeignKey, Integer, MetaData, Table
log = logging.getLogger( __name__ )
metadata = MetaData()
PageUserShareAssociation_table = Table( "page_user_share_association", metadata,
Column( "id", Integer, primary_key=True ),
Column( "page_id", Integer, ForeignKey( "page.id" ), index=True ),
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True ) )
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print(__doc__)
metadata.reflect()
# Create stored_workflow_tag_association table.
try:
PageUserShareAssociation_table.create()
except Exception as e:
print(str(e))
log.debug( "Creating page_user_share_association table failed: %s" % str( e ) )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
# Drop workflow_tag_association table.
try:
PageUserShareAssociation_table.drop()
except Exception as e:
print(str(e))
log.debug( "Dropping page_user_share_association table failed: %s" % str( e ) )
|
py | b40e06086b09d41de28c07afa4e7e6e0c124fc08 | #the meaning of this file is to commit the empty folder /oauth2/ |
py | b40e06179515e094683488fc1131f71e17f000ba | ## Basic configuration here:
access = {
"hostdir":"/home/mstucka/fud",
"dbhost":"localhost",
"dbuser":"mstucka",
"dbpassword":"yeahno",
"dbdatabase":"editorial"
}
|
py | b40e07ce259fc5b410b98a6672ff37d8bcb8c684 | #!/usr/bin/python
#
# Copyright (c) 2020 Blickfeld GmbH.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE.md file in the root directory of this source tree.
#
from __future__ import print_function
__all__ = [
"point_cloud",
"raw",
"status",
"imu",
]
from .point_cloud import point_cloud
from .raw import raw
from .status import status
from .imu import imu
|
py | b40e09562a81514241d819fa516e63a9b1d9f921 | # Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
"""This module includes test cases for testing target wallet application sending payment to another wallet.
The onchain accounts of target wallet application should be set up with at least 1_000_000_000_000 coins before running tests.
We can't mint coins for the sending payment onchain account, because depending on implementation details, sending
payment accounts maybe different with the onchain accounts used for receiving payments.
"""
from diem import identifier
from diem.testing import LocalAccount
from diem.testing.miniwallet import RestClient
from ..conftest import wait_for_balance
import pytest, aiohttp
pytestmark = pytest.mark.asyncio # pyre-ignore
@pytest.mark.parametrize("invalid_currency", ["XU", "USD", "xus", "", '"XUS"', "X"])
async def test_send_payment_with_invalid_currency(
stub_client: RestClient, target_client: RestClient, invalid_currency: str, currency: str
) -> None:
"""
Test Plan:
1. Generate valid receive payment account identifier.
2. Call send payment `POST /accounts/{account_id}/payments` with invalid currency code.
3. Expect server response 400 client error, and sender account balance is not changed.
"""
amount = 1_000_000
sender_account = await target_client.create_account(balances={currency: amount})
receiver_account = await stub_client.create_account()
try:
initial_balances = await sender_account.balances()
receiver_account_identifier = await receiver_account.generate_account_identifier()
with pytest.raises(aiohttp.ClientResponseError, match="400"):
await sender_account.send_payment(
currency=invalid_currency, amount=amount, payee=receiver_account_identifier
)
assert await sender_account.balances() == initial_balances
finally:
await receiver_account.log_events()
await sender_account.log_events()
@pytest.mark.parametrize("invalid_amount", [-1, 0.1])
async def test_send_payment_with_invalid_amount(
stub_client: RestClient, target_client: RestClient, invalid_amount: float, currency: str
) -> None:
"""
Test Plan:
1. Generate valid receive payment account identifier.
2. Call send payment `POST /accounts/{account_id}/payments` with invalid amount numbers.
3. Expect server response 400 client error, and sender account balance is not changed.
"""
amount = 1_000_000
sender_account = await target_client.create_account(balances={currency: amount})
receiver_account = await stub_client.create_account()
try:
initial_balances = await sender_account.balances()
receiver_account_identifier = await receiver_account.generate_account_identifier()
with pytest.raises(aiohttp.ClientResponseError, match="400"):
await sender_account.send_payment(
currency=currency, amount=invalid_amount, payee=receiver_account_identifier # pyre-ignore
)
assert await sender_account.balances() == initial_balances
finally:
await receiver_account.log_events()
await sender_account.log_events()
async def test_send_payment_with_invalid_account_identifier_as_payee(target_client: RestClient, currency: str) -> None:
"""
Test Plan:
1. Call send payment `POST /accounts/{account_id}/payments` with `invalid account identifier` as payee.
2. Expect server response 400 client error, and sender account balance is not changed.
"""
amount = 1_000_000
sender_account = await target_client.create_account(balances={currency: amount})
try:
with pytest.raises(aiohttp.ClientResponseError, match="400"):
await sender_account.send_payment(currency=currency, amount=amount, payee="invalid account identifier")
assert await sender_account.balance(currency) == amount
finally:
await sender_account.log_events()
async def test_send_payment_with_invalid_account_identifier_checksum_as_payee(
stub_client: RestClient, target_client: RestClient, currency: str
) -> None:
"""
Test Plan:
1. Generate valid receive payment account identifier.
2. Manuplate valid account identifier's checksum chars.
3. Call send payment `POST /accounts/{account_id}/payments` with invalid account identifier.
4. Expect server response 400 client error, and sender account balance is not changed.
"""
amount = 1_000_000
sender_account = await target_client.create_account(balances={currency: amount})
receiver_account = await stub_client.create_account()
try:
receiver_account_identifier = await receiver_account.generate_account_identifier()
invalid_account_identifier = receiver_account_identifier[:-6] + "000000"
with pytest.raises(aiohttp.ClientResponseError, match="400"):
await sender_account.send_payment(currency=currency, amount=amount, payee=invalid_account_identifier)
assert await sender_account.balance(currency) == amount
finally:
await receiver_account.log_events()
await sender_account.log_events()
async def test_send_payment_with_invalid_account_identifier_hrp_as_payee(
stub_client: RestClient, target_client: RestClient, currency: str, hrp: str
) -> None:
"""
Test Plan:
1. Generate valid receive payment account identifier.
2. Extract account onchain address and subaddress from receiving payment account identifier.
3. Use a different hrp and extracted account address and subaddress to create a new account identifier.
4. Call send payment `POST /accounts/{account_id}/payments` with created account identifier.
5. Expect server response 400 client error, and sender account balance is not changed.
"""
amount = 1_000_000
sender_account = await target_client.create_account(balances={currency: amount})
receiver_account = await stub_client.create_account()
try:
receiver_account_identifier = await receiver_account.generate_account_identifier()
account_address, subaddress = identifier.decode_account(receiver_account_identifier, hrp)
new_hrp = identifier.TDM if hrp != identifier.TDM else identifier.PDM
new_account_identifier = identifier.encode_account(account_address, subaddress, new_hrp)
with pytest.raises(aiohttp.ClientResponseError, match="400"):
await sender_account.send_payment(currency=currency, amount=amount, payee=new_account_identifier)
assert await sender_account.balance(currency) == amount
finally:
await receiver_account.log_events()
await sender_account.log_events()
async def test_send_payment_with_invalid_account_identifier_onchain_account_address_as_payee(
stub_client: RestClient, target_client: RestClient, currency: str, hrp: str
) -> None:
"""
Test Plan:
1. Generate valid receive payment account identifier.
2. Extract account onchain address and subaddress from receiving payment account identifier.
3. Use an invalid onchain account address and extracted subaddress to create a new account identifier.
4. Call send payment `POST /accounts/{account_id}/payments` with created account identifier.
5. Expect server response 400 client error, and sender account balance is not changed.
"""
amount = 1_000_000
sender_account = await target_client.create_account(balances={currency: amount})
receiver_account = await stub_client.create_account()
try:
receiver_account_identifier = await receiver_account.generate_account_identifier()
_, subaddress = identifier.decode_account(receiver_account_identifier, hrp)
invalid_account_address = LocalAccount().account_address
invalid_account_identifier = identifier.encode_account(invalid_account_address, subaddress, hrp)
with pytest.raises(aiohttp.ClientResponseError, match="400"):
await sender_account.send_payment(currency=currency, amount=amount, payee=invalid_account_identifier)
assert await sender_account.balance(currency) == amount
finally:
await receiver_account.log_events()
await sender_account.log_events()
async def test_send_payment_with_an_amount_exceeding_account_balance(
stub_client: RestClient, target_client: RestClient, currency: str
) -> None:
"""
Test Plan:
1. Generate valid receive payment account identifier.
2. Get sender account balance.
3. Call send payment `POST /accounts/{account_id}/payments` with amount = sender account balance + 1.
4. Expect server response 400 client error, and sender account balance is not changed.
"""
amount = 1_000_000
sender_account = await target_client.create_account(balances={currency: amount})
receiver_account = await stub_client.create_account()
try:
receiver_account_identifier = await receiver_account.generate_account_identifier()
with pytest.raises(aiohttp.ClientResponseError, match="400"):
await sender_account.send_payment(currency=currency, amount=amount + 1, payee=receiver_account_identifier)
assert await sender_account.balance(currency) == amount
finally:
await receiver_account.log_events()
await sender_account.log_events()
@pytest.mark.parametrize("amount", [10_000, 56_780_000, 120_000])
async def test_send_payment_with_valid_inputs_under_the_travel_rule_threshold(
stub_client: RestClient,
target_client: RestClient,
amount: int,
currency: str,
) -> None:
"""
Test Plan:
1. Generate valid receive payment account identifier.
2. Call send payment `POST /accounts/{account_id}/payments` with the account identifier.
3. Expect send payment success; receiver account balance increased by the amount sent; sender account balance decreased by the amount sent.
"""
sender_account = await target_client.create_account(balances={currency: amount})
receiver_account = await stub_client.create_account()
try:
receiver_account_identifier = await receiver_account.generate_account_identifier()
await sender_account.send_payment(currency=currency, amount=amount, payee=receiver_account_identifier)
await wait_for_balance(receiver_account, currency, amount)
await wait_for_balance(sender_account, currency, 0)
finally:
await receiver_account.log_events()
await sender_account.log_events()
@pytest.mark.parametrize(
"amount",
[
10_000,
999_990_000,
1_000_000_000,
2_000_000_000,
],
)
async def test_send_payment_to_the_other_account_in_the_same_wallet(
target_client: RestClient,
currency: str,
amount: int,
) -> None:
"""
Test Plan:
1. Create 2 accounts in target wallet application, one for sender, one for receiver.
2. Generate valid receive payment account identifier from the receiver account.
3. Send payment from sender account to receiver account.
4. Expect send payment success; receiver account balance increased by the amount sent; sender account balance decreased by the amount sent.
"""
sender_account = await target_client.create_account(balances={currency: amount})
receiver_account = await target_client.create_account()
try:
receiver_account_identifier = await receiver_account.generate_account_identifier()
await sender_account.send_payment(currency, amount, payee=receiver_account_identifier)
await wait_for_balance(sender_account, currency, 0)
await wait_for_balance(receiver_account, currency, amount)
finally:
await receiver_account.log_events()
await sender_account.log_events()
|
py | b40e0a1a8d4acec65d8b3be7d1348dbfe2fc8b21 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class StopInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'StopInstance')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_StoppedMode(self):
return self.get_query_params().get('StoppedMode')
def set_StoppedMode(self,StoppedMode):
self.add_query_param('StoppedMode',StoppedMode)
def get_ForceStop(self):
return self.get_query_params().get('ForceStop')
def set_ForceStop(self,ForceStop):
self.add_query_param('ForceStop',ForceStop)
def get_ConfirmStop(self):
return self.get_query_params().get('ConfirmStop')
def set_ConfirmStop(self,ConfirmStop):
self.add_query_param('ConfirmStop',ConfirmStop)
def get_DryRun(self):
return self.get_query_params().get('DryRun')
def set_DryRun(self,DryRun):
self.add_query_param('DryRun',DryRun)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId) |
py | b40e0a220b3196caf13b1d0d29c4610693117d80 | """
Low level signal processing utilities
Authors
* Anonymous
"""
import torch
import math
from packaging import version
def compute_amplitude(waveforms, lengths=None, amp_type="avg", scale="linear"):
"""Compute amplitude of a batch of waveforms.
Arguments
---------
waveform : tensor
The waveforms used for computing amplitude.
Shape should be `[time]` or `[batch, time]` or
`[batch, time, channels]`.
lengths : tensor
The lengths of the waveforms excluding the padding.
Shape should be a single dimension, `[batch]`.
amp_type : str
Whether to compute "avg" average or "peak" amplitude.
Choose between ["avg", "peak"].
scale : str
Whether to compute amplitude in "dB" or "linear" scale.
Choose between ["linear", "dB"].
Returns
-------
The average amplitude of the waveforms.
Example
-------
>>> signal = torch.sin(torch.arange(16000.0)).unsqueeze(0)
>>> compute_amplitude(signal, signal.size(1))
tensor([[0.6366]])
"""
if len(waveforms.shape) == 1:
waveforms = waveforms.unsqueeze(0)
assert amp_type in ["avg", "peak"]
assert scale in ["linear", "dB"]
if amp_type == "avg":
if lengths is None:
out = torch.mean(torch.abs(waveforms), dim=1, keepdim=True)
else:
wav_sum = torch.sum(input=torch.abs(waveforms), dim=1, keepdim=True)
out = wav_sum / lengths
elif amp_type == "peak":
out = torch.max(torch.abs(waveforms), dim=1, keepdim=True)[0]
else:
raise NotImplementedError
if scale == "linear":
return out
elif scale == "dB":
return torch.clamp(20 * torch.log10(out), min=-80) # clamp zeros
else:
raise NotImplementedError
def normalize(waveforms, lengths=None, amp_type="avg", eps=1e-14):
"""This function normalizes a signal to unitary average or peak amplitude.
Arguments
---------
waveforms : tensor
The waveforms to normalize.
Shape should be `[batch, time]` or `[batch, time, channels]`.
lengths : tensor
The lengths of the waveforms excluding the padding.
Shape should be a single dimension, `[batch]`.
amp_type : str
Whether one wants to normalize with respect to "avg" or "peak"
amplitude. Choose between ["avg", "peak"]. Note: for "avg" clipping
is not prevented and can occur.
eps : float
A small number to add to the denominator to prevent NaN.
Returns
-------
waveforms : tensor
Normalized level waveform.
"""
assert amp_type in ["avg", "peak"]
batch_added = False
if len(waveforms.shape) == 1:
batch_added = True
waveforms = waveforms.unsqueeze(0)
den = compute_amplitude(waveforms, lengths, amp_type) + eps
if batch_added:
waveforms = waveforms.squeeze(0)
return waveforms / den
def rescale(waveforms, lengths, target_lvl, amp_type="avg", scale="linear"):
"""This functions performs signal rescaling to a target level.
Arguments
---------
waveforms : tensor
The waveforms to normalize.
Shape should be `[batch, time]` or `[batch, time, channels]`.
lengths : tensor
The lengths of the waveforms excluding the padding.
Shape should be a single dimension, `[batch]`.
target_lvl : float
Target lvl in dB or linear scale.
amp_type : str
Whether one wants to rescale with respect to "avg" or "peak" amplitude.
Choose between ["avg", "peak"].
scale : str
whether target_lvl belongs to linear or dB scale.
Choose between ["linear", "dB"].
Returns
-------
waveforms : tensor
Rescaled waveforms.
"""
assert amp_type in ["peak", "avg"]
assert scale in ["linear", "dB"]
batch_added = False
if len(waveforms.shape) == 1:
batch_added = True
waveforms = waveforms.unsqueeze(0)
waveforms = normalize(waveforms, lengths, amp_type)
if scale == "linear":
out = target_lvl * waveforms
elif scale == "dB":
out = dB_to_amplitude(target_lvl) * waveforms
else:
raise NotImplementedError("Invalid scale, choose between dB and linear")
if batch_added:
out = out.squeeze(0)
return out
def convolve1d(
waveform,
kernel,
padding=0,
pad_type="constant",
stride=1,
groups=1,
use_fft=False,
rotation_index=0,
):
"""Use torch.nn.functional to perform 1d padding and conv.
Arguments
---------
waveform : tensor
The tensor to perform operations on.
kernel : tensor
The filter to apply during convolution.
padding : int or tuple
The padding (pad_left, pad_right) to apply.
If an integer is passed instead, this is passed
to the conv1d function and pad_type is ignored.
pad_type : str
The type of padding to use. Passed directly to
`torch.nn.functional.pad`, see PyTorch documentation
for available options.
stride : int
The number of units to move each time convolution is applied.
Passed to conv1d. Has no effect if `use_fft` is True.
groups : int
This option is passed to `conv1d` to split the input into groups for
convolution. Input channels should be divisible by the number of groups.
use_fft : bool
When `use_fft` is passed `True`, then compute the convolution in the
spectral domain using complex multiply. This is more efficient on CPU
when the size of the kernel is large (e.g. reverberation). WARNING:
Without padding, circular convolution occurs. This makes little
difference in the case of reverberation, but may make more difference
with different kernels.
rotation_index : int
This option only applies if `use_fft` is true. If so, the kernel is
rolled by this amount before convolution to shift the output location.
Returns
-------
The convolved waveform.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> signal = read_audio('samples/audio_samples/example1.wav')
>>> signal = signal.unsqueeze(0).unsqueeze(2)
>>> kernel = torch.rand(1, 10, 1)
>>> signal = convolve1d(signal, kernel, padding=(9, 0))
"""
if len(waveform.shape) != 3:
raise ValueError("Convolve1D expects a 3-dimensional tensor")
# Move time dimension last, which pad and fft and conv expect.
waveform = waveform.transpose(2, 1)
kernel = kernel.transpose(2, 1)
# Padding can be a tuple (left_pad, right_pad) or an int
if isinstance(padding, tuple):
waveform = torch.nn.functional.pad(
input=waveform, pad=padding, mode=pad_type,
)
# This approach uses FFT, which is more efficient if the kernel is large
if use_fft:
# Pad kernel to same length as signal, ensuring correct alignment
zero_length = waveform.size(-1) - kernel.size(-1)
# Handle case where signal is shorter
if zero_length < 0:
kernel = kernel[..., :zero_length]
zero_length = 0
# Perform rotation to ensure alignment
zeros = torch.zeros(
kernel.size(0), kernel.size(1), zero_length, device=kernel.device
)
after_index = kernel[..., rotation_index:]
before_index = kernel[..., :rotation_index]
kernel = torch.cat((after_index, zeros, before_index), dim=-1)
# Multiply in frequency domain to convolve in time domain
if version.parse(torch.__version__) > version.parse("1.6.0"):
import torch.fft as fft
result = fft.rfft(waveform) * fft.rfft(kernel)
convolved = fft.irfft(result, n=waveform.size(-1))
else:
f_signal = torch.rfft(waveform, 1)
f_kernel = torch.rfft(kernel, 1)
sig_real, sig_imag = f_signal.unbind(-1)
ker_real, ker_imag = f_kernel.unbind(-1)
f_result = torch.stack(
[
sig_real * ker_real - sig_imag * ker_imag,
sig_real * ker_imag + sig_imag * ker_real,
],
dim=-1,
)
convolved = torch.irfft(
f_result, 1, signal_sizes=[waveform.size(-1)]
)
# Use the implementation given by torch, which should be efficient on GPU
else:
convolved = torch.nn.functional.conv1d(
input=waveform,
weight=kernel,
stride=stride,
groups=groups,
padding=padding if not isinstance(padding, tuple) else 0,
)
# Return time dimension to the second dimension.
return convolved.transpose(2, 1)
def reverberate(waveforms, rir_waveform, rescale_amp="avg"):
"""
General function to contaminate a given signal with reverberation given a
Room Impulse Response (RIR).
It performs convolution between RIR and signal, but without changing
the original amplitude of the signal.
Arguments
---------
waveforms : tensor
The waveforms to normalize.
Shape should be `[batch, time]` or `[batch, time, channels]`.
rir_waveform : tensor
RIR tensor, shape should be [time, channels].
rescale_amp : str
Whether reverberated signal is rescaled (None) and with respect either
to original signal "peak" amplitude or "avg" average amplitude.
Choose between [None, "avg", "peak"].
Returns
-------
waveforms: tensor
Reverberated signal.
"""
orig_shape = waveforms.shape
if len(waveforms.shape) > 3 or len(rir_waveform.shape) > 3:
raise NotImplementedError
# if inputs are mono tensors we reshape to 1, samples
if len(waveforms.shape) == 1:
waveforms = waveforms.unsqueeze(0).unsqueeze(-1)
elif len(waveforms.shape) == 2:
waveforms = waveforms.unsqueeze(-1)
if len(rir_waveform.shape) == 1: # convolve1d expects a 3d tensor !
rir_waveform = rir_waveform.unsqueeze(0).unsqueeze(-1)
elif len(rir_waveform.shape) == 2:
rir_waveform = rir_waveform.unsqueeze(-1)
# Compute the average amplitude of the clean
orig_amplitude = compute_amplitude(
waveforms, waveforms.size(1), rescale_amp
)
# Compute index of the direct signal, so we can preserve alignment
value_max, direct_index = rir_waveform.abs().max(axis=1, keepdim=True)
# Making sure the max is always positive (if not, flip)
# mask = torch.logical_and(rir_waveform == value_max, rir_waveform < 0)
# rir_waveform[mask] = -rir_waveform[mask]
# Use FFT to compute convolution, because of long reverberation filter
waveforms = convolve1d(
waveform=waveforms,
kernel=rir_waveform,
use_fft=True,
rotation_index=direct_index,
)
# Rescale to the peak amplitude of the clean waveform
waveforms = rescale(
waveforms, waveforms.size(1), orig_amplitude, rescale_amp
)
if len(orig_shape) == 1:
waveforms = waveforms.squeeze(0).squeeze(-1)
if len(orig_shape) == 2:
waveforms = waveforms.squeeze(-1)
return waveforms
def dB_to_amplitude(SNR):
"""Returns the amplitude ratio, converted from decibels.
Arguments
---------
SNR : float
The ratio in decibels to convert.
Example
-------
>>> round(dB_to_amplitude(SNR=10), 3)
3.162
>>> dB_to_amplitude(SNR=0)
1.0
"""
return 10 ** (SNR / 20)
def notch_filter(notch_freq, filter_width=101, notch_width=0.05):
"""Returns a notch filter constructed from a high-pass and low-pass filter.
(from https://tomroelandts.com/articles/
how-to-create-simple-band-pass-and-band-reject-filters)
Arguments
---------
notch_freq : float
frequency to put notch as a fraction of the
sampling rate / 2. The range of possible inputs is 0 to 1.
filter_width : int
Filter width in samples. Longer filters have
smaller transition bands, but are more inefficient.
notch_width : float
Width of the notch, as a fraction of the sampling_rate / 2.
Example
-------
>>> from speechbrain.dataio.dataio import read_audio
>>> signal = read_audio('samples/audio_samples/example1.wav')
>>> signal = signal.unsqueeze(0).unsqueeze(2)
>>> kernel = notch_filter(0.25)
>>> notched_signal = convolve1d(signal, kernel)
"""
# Check inputs
assert 0 < notch_freq <= 1
assert filter_width % 2 != 0
pad = filter_width // 2
inputs = torch.arange(filter_width) - pad
# Avoid frequencies that are too low
notch_freq += notch_width
# Define sinc function, avoiding division by zero
def sinc(x):
def _sinc(x):
return torch.sin(x) / x
# The zero is at the middle index
return torch.cat([_sinc(x[:pad]), torch.ones(1), _sinc(x[pad + 1 :])])
# Compute a low-pass filter with cutoff frequency notch_freq.
hlpf = sinc(3 * (notch_freq - notch_width) * inputs)
hlpf *= torch.blackman_window(filter_width)
hlpf /= torch.sum(hlpf)
# Compute a high-pass filter with cutoff frequency notch_freq.
hhpf = sinc(3 * (notch_freq + notch_width) * inputs)
hhpf *= torch.blackman_window(filter_width)
hhpf /= -torch.sum(hhpf)
hhpf[pad] += 1
# Adding filters creates notch filter
return (hlpf + hhpf).view(1, -1, 1)
def overlap_and_add(signal, frame_step):
"""Taken from https://github.com/kaituoxu/Conv-TasNet/blob/master/src/utils.py
Reconstructs a signal from a framed representation.
Adds potentially overlapping frames of a signal with shape
`[..., frames, frame_length]`, offsetting subsequent frames by `frame_step`.
The resulting tensor has shape `[..., output_size]` where
output_size = (frames - 1) * frame_step + frame_length
Args:
signal: A [..., frames, frame_length] Tensor. All dimensions may be unknown, and rank must be at least 2.
frame_step: An integer denoting overlap offsets. Must be less than or equal to frame_length.
Returns:
A Tensor with shape [..., output_size] containing the overlap-added frames of signal's inner-most two dimensions.
output_size = (frames - 1) * frame_step + frame_length
Based on https://github.com/tensorflow/tensorflow/blob/r1.12/tensorflow/contrib/signal/python/ops/reconstruction_ops.py
Example
-------
>>> signal = torch.randn(5, 20)
>>> overlapped = overlap_and_add(signal, 20)
>>> overlapped.shape
torch.Size([100])
"""
outer_dimensions = signal.size()[:-2]
frames, frame_length = signal.size()[-2:]
subframe_length = math.gcd(
frame_length, frame_step
) # gcd=Greatest Common Divisor
subframe_step = frame_step // subframe_length
subframes_per_frame = frame_length // subframe_length
output_size = frame_step * (frames - 1) + frame_length
output_subframes = output_size // subframe_length
subframe_signal = signal.view(*outer_dimensions, -1, subframe_length)
frame = torch.arange(0, output_subframes).unfold(
0, subframes_per_frame, subframe_step
)
# frame_old = signal.new_tensor(frame).long() # signal may in GPU or CPU
frame = frame.clone().detach().to(signal.device.type)
# print((frame - frame_old).sum())
frame = frame.contiguous().view(-1)
result = signal.new_zeros(
*outer_dimensions, output_subframes, subframe_length
)
result.index_add_(-2, frame, subframe_signal)
result = result.view(*outer_dimensions, -1)
return result
def resynthesize(enhanced_mag, noisy_inputs, stft, istft, normalize_wavs=True):
"""Function for resynthesizing waveforms from enhanced mags.
Arguments
---------
enhanced_mag : torch.Tensor
Predicted spectral magnitude, should be three dimensional.
noisy_inputs : torch.Tensor
The noisy waveforms before any processing, to extract phase.
lengths : torch.Tensor
The length of each waveform for normalization.
stft : torch.nn.Module
Module for computing the STFT for extracting phase.
istft : torch.nn.Module
Module for computing the iSTFT for resynthesis.
normalize_wavs : bool
Whether to normalize the output wavs before returning them.
Returns
-------
enhanced_wav : torch.Tensor
The resynthesized waveforms of the enhanced magnitudes with noisy phase.
"""
# Extract noisy phase from inputs
noisy_feats = stft(noisy_inputs)
noisy_phase = torch.atan2(noisy_feats[:, :, :, 1], noisy_feats[:, :, :, 0])
# Combine with enhanced magnitude
complex_predictions = torch.mul(
torch.unsqueeze(enhanced_mag, -1),
torch.cat(
(
torch.unsqueeze(torch.cos(noisy_phase), -1),
torch.unsqueeze(torch.sin(noisy_phase), -1),
),
-1,
),
)
pred_wavs = istft(complex_predictions, sig_length=noisy_inputs.shape[1])
# Normalize. Since we're using peak amplitudes, ignore lengths
if normalize_wavs:
pred_wavs = normalize(pred_wavs, amp_type="peak")
return pred_wavs
|
py | b40e0a645735773f45eae9e8bb1b1fb451844af8 |
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='StraightEdge',
version='0.1',
description='Webscrapper',
long_description='Personal Built WebScrapper',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3. :: Only,
'Topic :: Internet :: WWW/HTTP',
],
keywords='webscrapper web scrapper text',
url='https://github.com/Apollo2707/StraightEdge',
author='Andrew Neumann',
author_email='[email protected]',
license='MIT',
packages=['StraightEdge'],
install_requires=[
],
include_package_data=True,
zip_safe=False)
|
py | b40e0aa6b412bbcad9e37e225bec828539a414d0 | import random
import time
print('Default initializiation:\n')
r1 = random.SystemRandom()
r2 = random.SystemRandom()
for i in range(3):
print('{:04.3f} {:04.3f}'.format(r1.random(), r2.random()))
print('\nSame seed:\n')
seed = time.time()
r1 = random.SystemRandom(seed)
r2 = random.SystemRandom(seed)
for i in range(3):
print('{:04.3f} {:04.3f}'.format(r1.random(), r2.random()))
|
py | b40e0ab57d3302c79f7980c4d2c89bdfea399af9 | # -*- coding: utf-8 -*-
"""Django AppSettings package."""
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import setting_changed
import six
from .settings import (
BooleanSetting, BooleanTypeChecker, DictSetting, DictTypeChecker,
FloatSetting, FloatTypeChecker, IntegerSetting, IntegerTypeChecker,
IterableSetting, IterableTypeChecker, ListSetting, ListTypeChecker,
ObjectSetting, ObjectTypeChecker, PositiveFloatSetting,
PositiveIntegerSetting, SetSetting, Setting, SetTypeChecker, StringSetting,
StringTypeChecker, TupleSetting, TupleTypeChecker, TypeChecker)
__all__ = (
'BooleanSetting',
'BooleanTypeChecker',
'DictSetting',
'DictTypeChecker',
'FloatSetting',
'FloatTypeChecker',
'IntegerSetting',
'IntegerTypeChecker',
'IterableSetting',
'IterableTypeChecker',
'ListSetting',
'ListTypeChecker',
'ObjectSetting',
'ObjectTypeChecker',
'PositiveFloatSetting',
'PositiveIntegerSetting',
'SetSetting',
'Setting',
'SetTypeChecker',
'StringSetting',
'StringTypeChecker',
'TupleSetting',
'TupleTypeChecker',
'TypeChecker'
)
class _Metaclass(type):
"""
``AppSettings``'s metaclass.
Each setting object declared in the class will be populated (name, prefix)
and moved into the _meta.settings dictionary. A reference to this
dictionary will also be added in the class as ``settings``.
"""
def __new__(mcs, cls, bases, dct):
"""
New method.
Args:
cls (str): class name.
bases (tuple): base classes to inherit from.
dct (dict): class attributes.
Returns:
class: the new created class.
"""
super_new = super(_Metaclass, mcs).__new__
# Also ensure initialization is only performed for subclasses
# of AppSettings (excluding AppSettings class itself).
parents = [b for b in bases if isinstance(b, _Metaclass)]
if not parents:
return super_new(mcs, cls, bases, dct)
new_attr = {}
_meta = dct.pop('Meta', type('Meta', (), {'setting_prefix': ''}))()
_meta.settings = {}
for name, setting in dct.items():
if isinstance(setting, Setting):
_meta.settings[name] = setting
# populate name
if setting.name == '':
setting.name = name
# populate prefix
if setting.prefix == '':
setting.prefix = _meta.setting_prefix
else:
new_attr[name] = setting
new_attr['_meta'] = _meta
new_attr['settings'] = _meta.settings
return super_new(mcs, cls, bases, new_attr)
def __getattr__(cls, item):
"""
Return a setting object if it is in the ``_meta.settings`` dictionary.
Args:
item (str):
the name of the setting variable (not the setting's name).
Returns:
``Setting``: the setting object.
Raises:
AttributeError if the setting does not exist.
"""
if item in cls._meta.settings.keys():
return cls._meta.settings[item]
raise AttributeError("'%s' class has no attribute '%s'" % (
cls.__name__, item))
class AppSettings(six.with_metaclass(_Metaclass)):
"""
Base class for application settings.
Only use this class as a parent class for inheritance. If you try to
access settings directly in ``AppSettings``, it will raise a
RecursionError. Some protections have been added to prevent you from
instantiating this very class, or to return immediately when running
``AppSettings.check()``, but trying to access attributes on the class is
not yet prevented.
"""
def __init__(self):
"""
Initialization method.
The ``invalidate_cache`` method will be connected to the Django
``setting_changed`` signal in this method, with the dispatch UID
being the id of this very object (``id(self)``).
"""
if self.__class__ == AppSettings:
raise RuntimeError('Do not use AppSettings class as itself, '
'use it as a base for subclasses')
setting_changed.connect(self.invalidate_cache, dispatch_uid=id(self))
self._cache = {}
def __getattr__(self, item):
"""
Return a setting value.
The caching is done here. If the setting exists, and if it's variable
name is in the cache dictionary, return the cached value. If there
is no cached value, get the setting value with ``setting.get_value()``,
cache it, and return it.
Args:
item (str):
the name of the setting variable (not the setting's name).
Returns:
object: a setting value.
Raises:
AttributeError if the setting does not exist.
"""
if item in self.settings.keys():
if item in self._cache:
return self._cache[item]
value = self._cache[item] = self.settings[item].get_value()
return value
raise AttributeError("'%s' object has no attribute '%s'" % (
repr(self), item))
@classmethod
def check(cls):
"""
Class method to check every settings.
Will raise an ``ImproperlyConfigured`` exception with explanation.
"""
if cls == AppSettings:
return None
exceptions = []
for setting in cls.settings.values():
try:
setting.check()
# pylama:ignore=W0703
except Exception as e:
exceptions.append(str(e))
if exceptions:
raise ImproperlyConfigured('\n'.join(exceptions))
def invalidate_cache(self, **kwargs):
"""Invalidate cache. Run when receive ``setting_changed`` signal."""
self._cache = {}
|
py | b40e0b117e1c1562c1bf5650730c8b01f16b93b6 | #!/usr/bin/env python
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
VERSION = "1.7.1"
setup(
name="buildbot_UnrealEngine",
version=VERSION,
description="Easy configuration for the Unreal Automation Tool",
long_description=long_description,
long_description_content_type="text/markdown",
author="Marvin Pohl",
author_email="[email protected]",
url="https://github.com/pampersrocker/buildbot-UnrealEngine",
packages=[
"buildbot_UnrealEngine",
"buildbot_UnrealEngine.BuildTool",
"buildbot_UnrealEngine.AutomationTool",
"buildbot_UnrealEngine.UnrealCommand"
],
requires=["Buildbot (>=2.2.0)"],
entry_points={
'buildbot.steps': [
'BuildCookRun = buildbot_UnrealEngine.AutomationTool:BuildCookRun',
'UEBuild = buildbot_UnrealEngine.BuildTool:Build',
'UERebuild = buildbot_UnrealEngine.BuildTool:Rebuild',
'UEClean = buildbot_UnrealEngine.BuildTool:Clean',
]
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Plugins",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Programming Language :: C++",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Build Tools",
]
)
|
py | b40e0b1bd34ea6e49c16747e9a0a2759f4a507b4 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2018/11/14 16:46
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : [email protected]
-------------------------------------------------
Description : 相交链表
编写一个程序,找到两个单链表相交的起始节点。
例如,下面的两个链表:
A: a1 → a2
↘
c1 → c2 → c3
↗
B: b1 → b2 → b3
在节点 c1 开始相交。
注意:
如果两个链表没有交点,返回 null.
在返回结果后,两个链表仍须保持原有的结构。
可假定整个链表结构中没有循环。
程序尽量满足 O(n) 时间复杂度,且仅用 O(1) 内存。
-------------------------------------------------
"""
import time
__author__ = 'Max_Pengjb'
start = time.time()
# 下面写上代码块
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
# 我写的很复杂啊。
# 网上别人的实现起来就很简单
# 用 p,q的交换很巧妙的实现了 相差个数的切换,免去了计数和很多variables
p1, p2 = headA, headB
while p1 != p2:
p1 = headB if not p1 else p1.next
p2 = headA if not p2 else p2.next
return p1
"""
if not headA or not headB:
return None
cnt_a = 0
cnt_b = 0
p = headA
q = headB
pp = headA
qq = headB
while p:
p = p.next
cnt_a += 1
while q:
q = q.next
cnt_b += 1
if p != q:
return None
if cnt_a > cnt_b:
for _ in range(cnt_a-cnt_b):
pp = pp.next
else:
for _ in range(cnt_b-cnt_a):
qq = qq.next
while pp != qq:
pp = pp.next
qq = qq.next
return pp
"""
head1 = ListNode(1)
p = head1
for i in range(2, 6):
p.next = ListNode(i)
p = p.next
head2 = ListNode(9)
head2.next = head1.next.next
res = Solution().getIntersectionNode(head1, head2)
print(not res or res.val)
# 上面中间写上代码块
end = time.time()
print('Running time: %s Seconds' % (end - start))
|
py | b40e0b8b429f9e7632400c3550b2b23b66747eaa | from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('management.views',
url(r'^$', 'manage_front', name='manage_front'),
url(r'^organizations/$', 'manage_organizations', name='manage_organizations'),
url(r'^organization/create/$', 'create_organization', name='manage.create_organization'),
url(r'^organization/invitation/(?P<invitation_key>\w+)/$', 'claim_organization_invitation', name='claim_organization_invitation'),
url(r'^organization/invitation/(?P<invitation_id>\w+)/edit/$', 'edit_organization_invitation', name='edit_organization_invitation'),
url(r'^organizations/invited/$', 'view_organizations_invited', name='view_organizations_invited'),
url(r'^organization/(?P<organization_slug>\w+)/edit/$', 'edit_organization', name='edit_organization'),
)
|
py | b40e0bbc693d66957a27bcca4b9969c32956e06b | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import struct
from pymysql.constants import FIELD_TYPE
ENCODING_TYPE = "utf-8"
def convert_bytes_to_float(value) -> float:
""" The result is a tuple even if it contains exactly one item """
return struct.unpack('d', value)[0]
def convert_bytes_to_int(value) -> int:
""" Range of integer in pg is the same with int or long in c,
we unpack the value in int format """
return struct.unpack('i', value)[0]
def convert_bytes_to_long_long(value) -> int:
return struct.unpack('q', value)[0]
def convert_bytes_to_str(value) -> str:
return value.decode(ENCODING_TYPE)
def convert_bytes_to_decimal(value) -> str:
return convert_bytes_to_str(value)
def convert_bytes_to_date(value) -> str:
return convert_bytes_to_str(value)
def convert_bytes_to_time(value) -> str:
return convert_bytes_to_str(value)
def convert_bytes_to_datetime(value) -> str:
return convert_bytes_to_str(value)
def convert_bytes_to_timedelta(value) -> str:
return convert_bytes_to_str(value)
MYSQL_DATATYPE_READER_MAP = {
FIELD_TYPE.BIT: convert_bytes_to_int,
FIELD_TYPE.TINY: convert_bytes_to_int,
FIELD_TYPE.SHORT: convert_bytes_to_int,
FIELD_TYPE.LONG: convert_bytes_to_int,
FIELD_TYPE.FLOAT: convert_bytes_to_float,
FIELD_TYPE.DOUBLE: convert_bytes_to_float,
FIELD_TYPE.LONGLONG: convert_bytes_to_long_long,
FIELD_TYPE.INT24: convert_bytes_to_int,
FIELD_TYPE.YEAR: convert_bytes_to_int,
FIELD_TYPE.TIMESTAMP: convert_bytes_to_datetime,
FIELD_TYPE.DATETIME: convert_bytes_to_datetime,
FIELD_TYPE.TIME: convert_bytes_to_time,
FIELD_TYPE.DATE: convert_bytes_to_date,
FIELD_TYPE.NEWDATE: convert_bytes_to_date,
FIELD_TYPE.SET: convert_bytes_to_str,
FIELD_TYPE.BLOB: convert_bytes_to_str,
FIELD_TYPE.TINY_BLOB: convert_bytes_to_str,
FIELD_TYPE.MEDIUM_BLOB: convert_bytes_to_str,
FIELD_TYPE.LONG_BLOB: convert_bytes_to_str,
FIELD_TYPE.STRING: convert_bytes_to_str,
FIELD_TYPE.VAR_STRING: convert_bytes_to_str,
FIELD_TYPE.VARCHAR: convert_bytes_to_str,
FIELD_TYPE.DECIMAL: convert_bytes_to_decimal,
FIELD_TYPE.NEWDECIMAL: convert_bytes_to_decimal,
FIELD_TYPE.ENUM: convert_bytes_to_str,
FIELD_TYPE.GEOMETRY: convert_bytes_to_str
}
|
py | b40e0d2ba3dd9a706cf2ecb16d24eeb1a9964b38 | _base_ = [
'../../_base_/models/detr.py',
# '../../_base_/datasets/mot_challenge.py',
# '../../_base_/default_runtime.py',
#'../../_base_/datasets/mot15-half.py',
]
custom_imports = dict(
imports=[
'mmtrack.models.mocap.trackformer',
# 'mmtrack.models.trackers.trackformer_tracker'
],
allow_failed_imports=False)
model = dict(type='Trackformer',
thres_reid=1,
thres_new=0.9,
thres_cont=0.5
)
dataset_type = 'CocoVideoDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadMultiImagesFromFile', to_float32=True),
dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True),
# dict(type='SeqLoadAnnotations', with_bbox=True),
# dict(type='SeqResize',
# img_scale=(800, 1333),
# share_params=True,
# keep_ratio=True,
# ),
dict(type='SeqNormalize', **img_norm_cfg),
dict(type='SeqPad', size_divisor=32),
dict(type='MatchInstances', skip_nomatch=True),
dict(
type='VideoCollect',
keys=[
'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices',
'gt_instance_ids'
]),
dict(type='SeqDefaultFormatBundle', ref_prefix='ref')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1280, 720),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='VideoCollect', keys=['img'])
])
]
data_root = 'data/iobt/iobt_node_1_zed_20220511/'
classes = ('car', )
data = dict(
samples_per_gpu=8,
workers_per_gpu=1,
train=dict(type=dataset_type,
# visibility_thr=-1,
classes=classes,
ann_file=data_root + 'annotations/train.json',
img_prefix=data_root + 'imgs/train',
ref_img_sampler=dict(
num_ref_imgs=1,
frame_range=10,
filter_key_img=True,
method='uniform'
),
pipeline=train_pipeline
),
val=dict(
type=dataset_type,
classes=classes,
ann_file=data_root + 'annotations/val.json',
img_prefix=data_root + 'imgs/val',
ref_img_sampler=None,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
classes=classes,
ann_file=data_root + 'annotations/train.json',
img_prefix=data_root + 'imgs',
ref_img_sampler=None,
pipeline=test_pipeline)
)
optimizer = dict(
type='AdamW',
#lr=2e-4,
lr=1e-4,
weight_decay=0.0001,
paramwise_cfg=dict(
custom_keys={
'backbone': dict(lr_mult=0.1),
'sampling_offsets': dict(lr_mult=0.1),
'reference_points': dict(lr_mult=0.1)
}))
optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))
lr_config = dict(policy='step', step=[66])
total_epochs = 100
evaluation = dict(metric=['bbox', 'track'], interval=1e8)
find_unused_parameters = True
checkpoint_config = dict(interval=10)
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
])
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
opencv_num_threads = 0
mp_start_method = 'fork'
|
py | b40e0d8ccd1e8b1133b68f6538783a377b41430c | ##
## Copyright (c) 2017-present, Facebook, Inc.
## All rights reserved.
## This source code is licensed under the BSD-style license found in the
## LICENSE file in the root directory of this source tree. An additional grant
## of patent rights can be found in the PATENTS file in the same directory.
##
from parlai.core.agents import Agent
from collections import defaultdict as dd
import spacy
from .models import ObjectChecklistModel, Seq2SeqModel
import numpy as np
from torch.autograd import Variable
import torch
import random
from copy import deepcopy
from projects.mastering_the_dungeon.tasks.graph_world2.graph import DEFAULT_ROOMS, DEFAULT_OBJECTS, DEFAULT_CONTAINERS, DEFAULT_AGENTS, DEDUP_OBJECTS, DEDUP_PROPS
nlp = spacy.load('en')
def parse_action_tuple(insts):
if insts[0] in ['go', 'drop', 'wear', 'wield', 'eat', 'drink', 'remove', 'unwield', 'hit']:
return insts[0], ' '.join(insts[1:])
if insts[0] == 'get':
args = ' '.join(insts[1:]).split(' from ')
if len(args) == 1:
return 'get', args[0]
else:
return 'get', args[0], args[1]
if insts[0] == 'give':
args = ' '.join(insts[1:]).split(' to ')
return 'give', args[0], args[1]
if insts[0] == 'take':
args = ' '.join(insts[1:]).split(' from ')
return 'take', args[0], args[1]
if insts[0] == 'put':
args = ' '.join(insts[1:]).split(' in ')
return 'put', args[0], args[1]
assert False, insts
def reverse_parse_action(action_tuple):
if action_tuple[0] == 'stop':
return 'STOP'
if action_tuple[0] in ['go', 'drop', 'wear', 'wield', 'eat', 'drink', 'remove', 'unwield', 'hit']:
return '{} {}'.format(action_tuple[0], action_tuple[1])
if action_tuple[0] == 'get':
if len(action_tuple) == 2:
return 'get {}'.format(action_tuple[1])
else:
return 'get {} from {}'.format(action_tuple[1], action_tuple[2])
if action_tuple[0] == 'give':
return 'give {} to {}'.format(action_tuple[1], action_tuple[2])
if action_tuple[0] == 'take':
return 'take {} from {}'.format(action_tuple[1], action_tuple[2])
if action_tuple[0] == 'put':
return 'put {} in {}'.format(action_tuple[1], action_tuple[2])
assert False, action_tuple
class DataAgentBase(Agent):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if not shared:
self.word2cnt = dd(int)
else:
self.word2cnt = shared['word2cnt']
def _tokenize(self, text, lower=True):
return list(map(lambda x: x.lower_ if lower else x.orth_, list(nlp(text))))
def act(self):
observation = self.observation
tokens = self._tokenize(observation['text'])
for token in tokens:
self.word2cnt[token] += 1
return {}
def build(self):
opt = self.opt
word2cnt = [(k, v) for k, v in self.word2cnt.items()]
word2cnt.sort(key = lambda x: x[1], reverse=True)
word_offset, word2index = 2, {}
word2index['PAD'] = 0
word2index['UNK'] = 1
for i in range(opt['vocab_size'] - word_offset):
if i >= len(word2cnt): break
word = word2cnt[i][0]
word2index[word] = i + word_offset
self.word2index = word2index
self.wordcnt = len(word2index)
def _get_word_index(self, token):
if token in self.word2index:
return self.word2index[token]
return self.word2index['UNK']
def share(self):
shared = super().share()
shared['word2cnt'] = self.word2cnt
return shared
def build_action_id(self):
action2id = {}
offset = 0
for ent in DEFAULT_ROOMS:
action2id[('go', ent)] = offset
offset += 1
for ent in DEDUP_OBJECTS + DEFAULT_CONTAINERS:
action2id[('get', ent)] = offset
offset += 1
action2id[('drop', ent)] = offset
offset += 1
for i, ent in enumerate(DEDUP_OBJECTS):
if DEDUP_PROPS[i] == 'food':
action2id[('eat', ent)] = offset
offset += 1
elif DEDUP_PROPS[i] == 'drink':
action2id[('drink', ent)] = offset
offset += 1
elif DEDUP_PROPS[i] == 'wearable':
action2id[('wear', ent)] = offset
offset += 1
action2id[('remove', ent)] = offset
offset += 1
elif DEDUP_PROPS[i] == 'wieldable':
action2id[('wield', ent)] = offset
offset += 1
action2id[('unwield', ent)] = offset
offset += 1
for ent_i in DEDUP_OBJECTS + DEFAULT_CONTAINERS:
for ent_j in DEFAULT_CONTAINERS:
if ent_i == ent_j: continue
action2id[('put', ent_i, ent_j)] = offset
offset += 1
action2id[('get', ent_i, ent_j)] = offset
offset += 1
for ent_i in DEDUP_OBJECTS + DEFAULT_CONTAINERS:
for ent_j in DEFAULT_AGENTS:
if ent_j == 'dragon': continue
action2id[('give', ent_i, ent_j)] = offset
offset += 1
action2id[('take', ent_i, ent_j)] = offset
offset += 1
for ent in DEFAULT_AGENTS:
if ent != 'dragon':
action2id[('hit', ent)] = offset
offset += 1
action2id[('stop', )] = offset
offset += 1
self.y_dim = offset
print('y_dim = {}'.format(self.y_dim))
self.action2id = action2id
self.id2action = [None for _ in range(self.y_dim)]
for k, v in self.action2id.items():
self.id2action[v] = k
def build_action_key(self):
action_key = np.zeros((self.y_dim, ), dtype=np.int64)
for i in range(self.y_dim):
action_tuple = self.get_action_tuple(i)
if len(action_tuple) <= 1: continue
my_key = action_tuple[1]
action_key[i] = self._get_word_index(my_key.replace(' ', '_'))
self.action_key = action_key
def build_second_action_key(self):
second_action_key = np.zeros((self.y_dim, ), dtype=np.int64)
for i in range(self.y_dim):
action_tuple = self.get_action_tuple(i)
if len(action_tuple) <= 2: continue
my_key = action_tuple[2]
second_action_key[i] = self._get_word_index(my_key.replace(' ', '_'))
self.second_action_key = second_action_key
def build_action_type(self):
action_types = deepcopy(self.ACTION_TYPES)
action_type = np.zeros((self.y_dim, ), dtype=np.int64)
for i in range(self.y_dim):
action_tuple = self.get_action_tuple(i)
my_type = action_tuple[0]
action_type[i] = action_types.index(my_type)
self.action_type = action_type
self.num_actions = len(action_types)
def get_num_actions(self):
return self.num_actions
def build_check_mapping(self):
check_to_key = {}
for i in range(self.y_dim):
action_tuple = self.get_action_tuple(i)
if len(action_tuple) == 1:
check_to_key[action_tuple] = action_tuple[0]
else:
check_to_key[action_tuple] = action_tuple[1]
key_to_check = dd(set)
for k, v in check_to_key.items():
key_to_check[v].add(k)
self.check_to_key, self.key_to_check = check_to_key, key_to_check
check_mapping = np.zeros((self.y_dim, self.y_dim), dtype=np.float32)
for i in range(self.y_dim):
for j in range(self.y_dim):
if self.get_action_tuple(j) in key_to_check[check_to_key[self.get_action_tuple(i)]]:
check_mapping[i, j] = 1.0
self.check_mapping = check_mapping
def get_check_mapping(self):
return self.check_mapping
def get_action_tuple(self, id):
return self.id2action[id]
def get_action_id(self, action):
return self.action2id[action]
def reverse_parse_action(self, action_tuple):
return reverse_parse_action(action_tuple)
def get_mask(self, g, mask):
possible_actions = g.get_possible_actions()
for action in possible_actions:
action_tuple = parse_action_tuple(action.split())
action_id = self.get_action_id(action_tuple)
mask[action_id] = 1.0
mask[self.get_action_id(('stop', ))] = 1.0
class ObjectChecklistDataAgent(DataAgentBase):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.num_rooms = len(DEFAULT_ROOMS)
self.num_objects = len(DEDUP_OBJECTS)
self.num_containers = len(DEFAULT_CONTAINERS)
self.num_npcs = len(DEFAULT_AGENTS) - 1
def build(self):
self.ACTION_TYPES = ['go', 'get', 'drop', 'eat', 'drink', 'wear', 'wield', 'remove', 'unwield', 'give', 'take', 'put', 'hit', 'stop']
super().build()
self.build_action_id()
self.build_action_key()
self.build_second_action_key()
self.build_action_type()
self.build_check_mapping()
def get_room(self, g):
return self._get_word_index(g.node_to_desc_raw(g.node_contained_in('dragon')).replace(' ', '_'))
def _tokenize(self, text, lower=True):
tokenized = ' '.join(list(map(lambda x: x.lower_ if lower else x.orth_, list(nlp(text)))))
for ent in DEFAULT_ROOMS + DEFAULT_CONTAINERS + DEFAULT_AGENTS + DEDUP_OBJECTS:
tokenized = tokenized.replace(ent, ent.replace(' ', '_'))
return tokenized.split()
def get_data(self, observations, datatype='train', assert_=True):
opt = self.opt
batch_size = len(observations)
seq_in, seq_out = 0, 0
tokens_list, inst_list, symb_points_list = [], [], []
for observation in observations:
graph, text, actions = observation['graph'], observation['text'], observation['actions']
tokens_list.append(self._tokenize(text))
seq_in = max(seq_in, len(tokens_list[-1]))
graph = observation['graph']
inst, symb_points = graph.parse(actions)
seq_out = max(seq_out, len(symb_points) - 1 + 1) # +1 for stop
inst_list.append(inst)
symb_points_list.append(symb_points)
if datatype == 'valid':
seq_out = opt['max_seq_out']
seq_in = min(seq_in, opt['max_seq_in'])
y_dim = self.y_dim
x = np.zeros((batch_size, seq_in), dtype=np.int64)
current_room = np.zeros((batch_size, seq_out), dtype=np.int64)
checked = np.zeros((batch_size, seq_out + 1, y_dim), dtype=np.float32)
y = np.zeros((batch_size, seq_out, y_dim), dtype=np.float32)
y_mask = np.zeros((batch_size, seq_out, y_dim), dtype=np.float32)
counter_feat = np.zeros((batch_size, seq_out, y_dim), dtype=np.int64)
graph = observations[0]['graph']
action_key = self.action_key
action_type = self.action_type
second_action_key = self.second_action_key
for i in range(batch_size):
for j, token in enumerate(tokens_list[i]):
if j >= seq_in: break
x[i, j] = self._get_word_index(token)
inst = inst_list[i]
g = observations[i]['graph'].copy()
len_plus_one = len(symb_points_list[i])
action_tuples = []
for j in range(len_plus_one - 1):
k, l = symb_points_list[i][j], symb_points_list[i][j + 1]
action_tuples.append(parse_action_tuple(inst[k: l]))
for j in range(len_plus_one):
if j < len_plus_one - 1:
cur_tuple = action_tuples[j]
y[i, j, self.get_action_id(cur_tuple)] = 1.0
else:
stop_tuple = ('stop', )
y[i, j, self.get_action_id(stop_tuple)] = 1.0
current_room[i, j] = self.get_room(g)
self.get_mask(g, y_mask[i, j])
if j < len_plus_one - 1:
k, l = symb_points_list[i][j], symb_points_list[i][j + 1]
parse_success = g.parse_exec(' '.join(inst[k: l]))
if assert_:
assert parse_success, ' '.join(inst[k: l]) + ' ' + ' '.join(inst)
counter_feat[i, j + 1] = counter_feat[i, j]
cur_tuple = action_tuples[j]
for action_name in self.key_to_check[self.check_to_key[cur_tuple]]:
action_id = self.get_action_id(action_name)
counter_feat[i, j + 1, action_id] += 1
counter_feat = np.clip(counter_feat, None, opt['counter_max'])
return x, action_key, second_action_key, action_type, current_room, checked, y, y_mask, counter_feat
class Seq2SeqDataAgent(DataAgentBase):
def build(self):
self.ACTION_TYPES = ['go', 'get', 'drop', 'eat', 'drink', 'wear', 'wield', 'remove', 'unwield', 'give', 'take', 'put', 'hit', 'stop']
super().build()
self.build_action_id()
def get_data(self, observations, datatype='train', assert_=True):
opt = self.opt
batch_size = len(observations)
seq_in, seq_out = 0, 0
tokens_list, inst_list, symb_points_list = [], [], []
for observation in observations:
graph, text, actions = observation['graph'], observation['text'], observation['actions']
tokens_list.append(self._tokenize(text))
seq_in = max(seq_in, len(tokens_list[-1]))
graph = observation['graph']
inst, symb_points = graph.parse(actions)
seq_out = max(seq_out, len(symb_points) - 1 + 1) # +1 for stop
inst_list.append(inst)
symb_points_list.append(symb_points)
if datatype == 'valid':
seq_out = opt['max_seq_out']
seq_in = min(seq_in, opt['max_seq_in'])
y_dim = self.y_dim
x = np.zeros((batch_size, seq_in), dtype=np.int64)
y = np.zeros((batch_size, seq_out, y_dim), dtype=np.float32)
for i in range(batch_size):
for j, token in enumerate(tokens_list[i]):
if j >= seq_in: break
x[i, j] = self._get_word_index(token)
inst = inst_list[i]
g = observations[i]['graph'].copy()
len_plus_one = len(symb_points_list[i])
action_tuples = []
for j in range(len_plus_one - 1):
k, l = symb_points_list[i][j], symb_points_list[i][j + 1]
action_tuples.append(parse_action_tuple(inst[k: l]))
for j in range(len_plus_one):
if j < len_plus_one - 1:
cur_tuple = action_tuples[j]
y[i, j, self.get_action_id(cur_tuple)] = 1.0
else:
stop_tuple = ('stop', )
y[i, j, self.get_action_id(stop_tuple)] = 1.0
if j < len_plus_one - 1:
k, l = symb_points_list[i][j], symb_points_list[i][j + 1]
parse_success = g.parse_exec(' '.join(inst[k: l]))
if assert_:
assert parse_success, ' '.join(inst[k: l])
return x, y
class ModelAgentBase(Agent):
def __init__(self, opt, shared=None, data_agent=None):
super().__init__(opt, shared)
if not shared:
self.data_agent = data_agent
params = filter(lambda p: p.requires_grad, self.model.parameters())
self.optimizer = torch.optim.Adam(params, lr=opt['lr'])
if opt['cuda']:
self.model.cuda()
else:
self.data_agent = shared['data_agent']
self.model = shared['model']
self.optimizer = shared['optimizer']
def share(self):
shared = super().share()
shared['data_agent'] = self.data_agent
shared['model'] = self.model
shared['optimizer'] = self.optimizer
return shared
def _get_variable(self, np_a, volatile=False):
if self.opt['cuda']:
return Variable(torch.from_numpy(np_a).cuda(), volatile=volatile)
return Variable(torch.from_numpy(np_a), volatile=volatile)
def _get_f1(self, tokens_1, tokens_2):
tokens_1, tokens_2 = set(tokens_1), set(tokens_2)
tp, fp, fn = 0, 0, 0
for token in tokens_2:
if token in tokens_1:
tp += 1
else:
fp += 1
for token in tokens_1:
if token not in tokens_2:
fn += 1
prec = 1.0 * tp / (tp + fp) if tp + fp > 0 else 0.0
recall = 1.0 * tp / (tp + fn) if tp + fn > 0 else 0.0
f1 = 2.0 * prec * recall / (prec + recall) if prec + recall > 0 else 0.0
return f1
def act(self):
return self.batch_act([self.observation])[0]
class ObjectChecklistModelAgent(ModelAgentBase):
def __init__(self, opt, shared=None, data_agent=None):
if not shared:
self.model = ObjectChecklistModel(opt, data_agent)
super().__init__(opt, shared, data_agent)
def batch_act(self, observations):
ori_len = len(observations)
observations = [obv for obv in observations if 'text' in obv]
if self.opt['datatype'] == 'train' or self.opt['datatype'] == 'pretrain':
x, action_key, second_action_key, action_type, current_room, checked, y, y_mask, counter_feat = self.data_agent.get_data(observations)
x, action_key, second_action_key, action_type, current_room, checked, y, y_mask, counter_feat = self._get_variable(x), self._get_variable(action_key), self._get_variable(second_action_key), self._get_variable(action_type), self._get_variable(current_room), self._get_variable(checked), self._get_variable(y), self._get_variable(y_mask), self._get_variable(counter_feat)
loss = self.model.forward_loss(x, action_key, second_action_key, action_type, current_room, checked, y, y_mask, counter_feat)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
reply = [{'loss': loss.data[0]} for _ in range(ori_len)]
return reply
else:
x, action_key, second_action_key, action_type, current_room, checked, y, y_mask, counter_feat = self.data_agent.get_data(observations, 'valid')
x, action_key, second_action_key, action_type, current_room, checked, y, y_mask, counter_feat = self._get_variable(x, True), self._get_variable(action_key, True), self._get_variable(second_action_key, True), self._get_variable(action_type, True), self._get_variable(current_room, True), self._get_variable(checked, True), self._get_variable(y, True), self._get_variable(y_mask, True), self._get_variable(counter_feat)
loss = self.model.forward_loss(x, action_key, second_action_key, action_type, current_room, checked, y, y_mask, counter_feat, False)
reply = [{'loss': 0.0, 'cnt': 0.0, 'acc': 0, 'len': 0, 'f1': 0, 'correct_data': [], 'wrong_data': []} for _ in range(ori_len)]
check_mapping = self.data_agent.get_check_mapping()
check_mapping = self._get_variable(check_mapping, True)
text_out = self.model.forward_predict(x, action_key, second_action_key, action_type, check_mapping, checked, [obv['graph'] for obv in observations], self.data_agent)
for i in range(len(observations)):
data_rep = '{} ||| {} ||| {}'.format(observations[i]['actions'], ' '.join(text_out[i][: -1]), observations[i]['text'])
graph_a, graph_b = observations[i]['graph'].copy(), observations[i]['graph'].copy()
graph_a.parse_exec(observations[i]['actions'])
graph_b.parse_exec(' '.join(text_out[i][: -1]))
if graph_a == graph_b:
reply[i]['acc'] = 1.0
reply[i]['correct_data'].append(data_rep)
else:
reply[i]['wrong_data'].append(data_rep)
inst, symb_points = observations[i]['graph'].parse(observations[i]['actions'])
text_gt = []
for j in range(len(symb_points) - 1):
k, l = symb_points[j], symb_points[j + 1]
text_gt.append(' '.join(inst[k: l]))
reply[i]['f1'] = self._get_f1(text_gt, text_out[i])
reply[i]['loss'] = loss.data[0]
reply[i]['cnt'] = observations[i]['weight']
reply[i]['len'] = len(text_gt)
return reply
class Seq2SeqModelAgent(ModelAgentBase):
def __init__(self, opt, shared=None, data_agent=None):
if not shared:
self.model = Seq2SeqModel(opt, data_agent)
super().__init__(opt, shared, data_agent)
def batch_act(self, observations):
ori_len = len(observations)
observations = [obv for obv in observations if 'text' in obv]
if self.opt['datatype'] == 'train':
x, y = self.data_agent.get_data(observations)
x, y = self._get_variable(x), self._get_variable(y)
loss = self.model.forward_loss(x, y)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
reply = [{}] * ori_len
reply[0]['loss'] = loss.data[0]
return reply
else:
x, y = self.data_agent.get_data(observations, 'valid')
x, y = self._get_variable(x), self._get_variable(y)
loss = self.model.forward_loss(x, y)
reply = [{'loss': 0.0, 'cnt': 0.0, 'acc': 0, 'len': 0, 'f1': 0, 'correct_data': [], 'wrong_data': []} for _ in range(ori_len)]
text_out = self.model.forward_predict(x, [obv['graph'] for obv in observations], self.data_agent)
for i in range(len(observations)):
data_rep = '{} ||| {} ||| {}'.format(observations[i]['actions'], ' '.join(text_out[i][: -1]), observations[i]['text'])
graph_a, graph_b = observations[i]['graph'].copy(), observations[i]['graph'].copy()
graph_a.parse_exec(observations[i]['actions'])
graph_b.parse_exec(' '.join(text_out[i][: -1]))
if graph_a == graph_b:
reply[i]['acc'] = 1.0
reply[i]['correct_data'].append(data_rep)
else:
reply[i]['wrong_data'].append(data_rep)
inst, symb_points = observations[i]['graph'].parse(observations[i]['actions'])
text_gt = []
for j in range(len(symb_points) - 1):
k, l = symb_points[j], symb_points[j + 1]
text_gt.append(' '.join(inst[k: l]))
reply[i]['f1'] = self._get_f1(text_gt, text_out[i])
reply[i]['loss'] = loss.data[0]
reply[i]['cnt'] = observations[i]['weight']
reply[i]['len'] = len(text_gt)
return reply
|
py | b40e0e0af1856277aa744f5f6d619db17eec5e7b | _base_ = './rpn_r50_caffe_fpn_1x_coco.py'
model = dict(
pretrained='open-mmlab://detectron2/resnet101_caffe',
backbone=dict(depth=101))
|
py | b40e0ee0187cf1b1459305acaeed310a4261bbeb | #
# This file is part of do-mpc
#
# do-mpc: An environment for the easy, modular and efficient implementation of
# robust nonlinear model predictive control
#
# Copyright (c) 2014-2019 Sergio Lucia, Alexandru Tatulea-Codrean
# TU Dortmund. All rights reserved
#
# do-mpc is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# do-mpc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with do-mpc. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
from casadi import *
from casadi.tools import *
import pdb
import sys
sys.path.append('../../')
import do_mpc
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import time
from template_model import template_model
from template_mpc import template_mpc
from template_simulator import template_simulator
""" User settings: """
show_animation = True
store_results = False
"""
Get configured do-mpc modules:
"""
model = template_model()
mpc = template_mpc(model)
simulator = template_simulator(model)
estimator = do_mpc.estimator.StateFeedback(model)
"""
Set initial state
"""
X_s_0 = 1.0 # This is the initial concentration inside the tank [mol/l]
S_s_0 = 0.5 # This is the controlled variable [mol/l]
P_s_0 = 0.0 #[C]
V_s_0 = 120.0 #[C]
x0 = np.array([X_s_0, S_s_0, P_s_0, V_s_0])
mpc.x0 = x0
simulator.x0 = x0
estimator.x0 = x0
mpc.set_initial_guess()
"""
Setup graphic:
"""
fig, ax, graphics = do_mpc.graphics.default_plot(mpc.data, figsize=(8,5))
plt.ion()
"""
Run MPC main loop:
"""
for k in range(150):
u0 = mpc.make_step(x0)
y_next = simulator.make_step(u0)
x0 = estimator.make_step(y_next)
if show_animation:
graphics.plot_results(t_ind=k)
graphics.plot_predictions(t_ind=k)
graphics.reset_axes()
plt.show()
plt.pause(0.01)
input('Press any key to exit.')
# Store results:
if store_results:
do_mpc.data.save_results([mpc, simulator], 'batch_reactor_MPC') |
py | b40e0fe40f8715247a4108fd9c15d7d0945caf3e | ## -*- coding: utf-8 -*-
##
## Jonathan Salwan - 2014-05-12 - ROPgadget tool
##
## http://twitter.com/JonathanSalwan
## http://shell-storm.org/project/ROPgadget/
##
from ropgadget.loaders.elf import *
from ropgadget.loaders.pe import *
from ropgadget.loaders.raw import *
from ropgadget.loaders.macho import *
from ropgadget.loaders.universal import *
from binascii import unhexlify
class Binary(object):
def __init__(self, options):
self.__fileName = options.binary
self.__rawBinary = None
self.__binary = None
try:
fd = open(self.__fileName, "rb")
self.__rawBinary = fd.read()
fd.close()
except:
print("[Error] Can't open the binary or binary not found")
return None
if options.rawArch and options.rawMode:
self.__binary = Raw(self.__rawBinary, options.rawArch, options.rawMode, options.rawEndian)
elif self.__rawBinary[:4] == unhexlify(b"7f454c46"):
self.__binary = ELF(self.__rawBinary)
elif self.__rawBinary[:2] == unhexlify(b"4d5a"):
self.__binary = PE(self.__rawBinary)
elif self.__rawBinary[:4] == unhexlify(b"cafebabe"):
self.__binary = UNIVERSAL(self.__rawBinary)
elif self.__rawBinary[:4] == unhexlify(b"cefaedfe") or self.__rawBinary[:4] == unhexlify(b"cffaedfe"):
self.__binary = MACHO(self.__rawBinary)
else:
print("[Error] Binary format not supported")
return None
def getFileName(self):
return self.__fileName
def getRawBinary(self):
return self.__rawBinary
def getBinary(self):
return self.__binary
def getEntryPoint(self):
return self.__binary.getEntryPoint()
def getDataSections(self):
return self.__binary.getDataSections()
def getExecSections(self):
return self.__binary.getExecSections()
def getArch(self):
return self.__binary.getArch()
def getArchMode(self):
return self.__binary.getArchMode()
def getEndian(self):
return self.__binary.getEndian()
def getFormat(self):
return self.__binary.getFormat()
|
py | b40e101746cea368a7651ae2c5cd298e6a9a3829 | from collections import deque
import random
import gym
import numpy as np
from tensorflow.keras import models, layers, optimizers
import matplotlib.pyplot as plt
class DQN(object):
def __init__(self):
self.step = 0
self.update_freq = 200 # 模型更新频率
self.replay_size = 2000 # 训练集大小
self.replay_queue = deque(maxlen=self.replay_size)
self.model = self.create_model()
self.target_model = self.create_model()
def create_model(self):
"""创建一个隐藏层为100的神经网络"""
STATE_DIM, ACTION_DIM = 2, 3
model = models.Sequential([
layers.Dense(100, input_dim=STATE_DIM, activation='relu'),
layers.Dense(ACTION_DIM, activation="linear")
])
model.compile(loss='mean_squared_error',
optimizer=optimizers.Adam(0.001))
return model
def act(self, s, epsilon=0.1):
"""预测动作"""
# 刚开始时,加一点随机成分,产生更多的状态
if np.random.uniform() < epsilon - self.step * 0.0002:
return np.random.choice([0, 1, 2])
return np.argmax(self.model.predict(np.array([s]))[0])
def save_model(self, file_path='MountainCar-v0-dqn.h5'):
print('model saved')
self.model.save(file_path)
def remember(self, s, a, next_s, reward):
"""历史记录,position >= 0.4时给额外的reward,快速收敛"""
if next_s[0] >= 0.4:
reward += 1
self.replay_queue.append((s, a, next_s, reward))
def train(self, batch_size=64, lr=1, factor=0.95):
if len(self.replay_queue) < self.replay_size:
return
self.step += 1
# 每 update_freq 步,将 model 的权重赋值给 target_model
if self.step % self.update_freq == 0:
self.target_model.set_weights(self.model.get_weights())
replay_batch = random.sample(self.replay_queue, batch_size)
s_batch = np.array([replay[0] for replay in replay_batch])
next_s_batch = np.array([replay[2] for replay in replay_batch])
Q = self.model.predict(s_batch)
Q_next = self.target_model.predict(next_s_batch)
# 使用公式更新训练集中的Q值
for i, replay in enumerate(replay_batch):
_, a, _, reward = replay
Q[i][a] = (1 - lr) * Q[i][a] + lr * (reward + factor * np.amax(Q_next[i]))
# 传入网络进行训练
self.model.fit(s_batch, Q, verbose=0)
env = gym.make('MountainCar-v0')
episodes = 1000 # 训练1000次
score_list = [] # 记录所有分数
agent = DQN()
for i in range(episodes):
s = env.reset()
score = 0
while True:
a = agent.act(s)
next_s, reward, done, _ = env.step(a)
agent.remember(s, a, next_s, reward)
agent.train()
score += reward
s = next_s
if done:
score_list.append(score)
print('episode:', i, 'score:', score, 'max:', max(score_list), agent.step)
break
# 最后10次的平均分大于 -160 时,停止并保存模型
if np.mean(score_list[-10:]) > -160:
agent.save_model()
break
env.close()
plt.plot(score_list, color='green')
plt.show() |
py | b40e10914816ceafe653a1532e781cec121c3ba4 |
from morphforge.stdimports import *
from morphforgecontrib.stdimports import StdChlLeak, StdChlAlphaBeta
@cached_functor
def get_Na_Channels(env):
na_state_vars = {"m":
{"alpha": [13.01,0,4,-1.01,-12.56], "beta": [5.73,0,1,9.01,9.69] },
"h":
{"alpha": [0.06,0,0,30.88,26], "beta": [3.06,0,1,-7.09,-10.21]}
}
return env.Channel(
StdChlAlphaBeta,
name="NaChl", ion="na",
equation="m*m*m*h",
conductance=qty("210:nS") / qty("400:um2"),
reversalpotential=qty("50.0:mV"),
statevars=na_state_vars,
)
@cached_functor
def get_Ks_Channels(env):
kf_state_vars = {"ks": {"alpha": [0.2,0,1,-6.96,-7.74 ], "beta": [0.05,0,2,-18.07,6.1 ] } }
return env.Channel(
StdChlAlphaBeta,
name="KsChl", ion="ks",
equation="ks*ks*ks*ks",
conductance=qty("3:nS") / qty("400:um2"),
reversalpotential=qty("-80.0:mV"),
statevars=kf_state_vars,
)
@cached_functor
def get_Kf_Channels(env):
kf_state_vars = {"kf": {"alpha": [ 3.1,0,1,-31.5,-9.3], "beta": [0.44,0,1,4.98,16.19 ] } }
return env.Channel(
StdChlAlphaBeta,
name="KfChl", ion="kf",
equation="kf*kf*kf*kf",
conductance=qty("0.5:nS") / qty("400:um2") ,
reversalpotential=qty("-80.0:mV"),
statevars=kf_state_vars,
)
@cached_functor
def get_Lk_Channels(env):
lk_chl = env.Channel(
StdChlLeak,
name="LkChl",
conductance=qty("3.6765:nS") / qty("400:um2"),
reversalpotential=qty("-51:mV"),
)
return lk_chl
def simulate(current_inj_level):
# Create the environment:
env = NEURONEnvironment()
# Create the simulation:
sim = env.Simulation(name="AA")
# Create a cell:
morphDict1 = {'root': {'length': 20, 'diam': 20, 'id':'soma'} }
morph = MorphologyTree.fromDictionary(morphDict1)
cell = sim.create_cell(name="Cell1", morphology=morph)
lk_chl = get_Lk_Channels(env)
na_chl = get_Na_Channels(env)
potFastChannels = get_Kf_Channels(env)
potSlowChannels = get_Ks_Channels(env)
cell.apply_channel( lk_chl)
cell.apply_channel( na_chl)
cell.apply_channel( potFastChannels)
cell.apply_channel( potSlowChannels)
cell.set_passive( PassiveProperty.SpecificCapacitance, qty('2.0:uF/cm2'))
# Create the stimulus and record the injected current:
cc = sim.create_currentclamp(amp=current_inj_level, dur=qty("100:ms"), delay=qty("100:ms"), cell_location=cell.soma)
sim.record(cc, what=StandardTags.Current)
# Define what to record:
sim.record(cell, what=StandardTags.Voltage, cell_location = cell.soma)
# run the simulation
results = sim.run()
return results
# Display the results:
#results = [simulate(current_inj_level='%d:pA' % i) for i in [50,100,150,200, 250, 300] ]
results = [simulate(current_inj_level='%d:pA' % i) for i in [50] ]
# Create an output .pdf of the first simulation:
SimulationMRedoc.build( results[0] ).to_pdf(__file__ + '.pdf')
TagViewer(results, timerange=(95, 200)*units.ms, show=True)
|
py | b40e10bb5df1470813fbf1c0dfe681005b13bd8c | # -*- coding: utf-8 -*-
"""
The static grains, these are the core, or built in grains.
When grains are loaded they are not loaded in the same way that modules are
loaded, grain functions are detected and executed, the functions MUST
return a dict which will be applied to the main grains dict. This module
will always be executed first, so that any grains loaded here in the core
module can be overwritten just by returning dict keys with the same value
as those returned here
"""
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import locale
import logging
import os
import platform
import re
import socket
import sys
import time
import uuid
import zlib
from errno import EACCES, EPERM
import datetime
import warnings
from errno import EACCES, EPERM
# Import salt libs
import salt.exceptions
import salt.log
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
import salt.modules.smbios
import salt.utils.args
import salt.utils.dns
import salt.utils.files
import salt.utils.network
import salt.utils.path
import salt.utils.pkg.rpm
import salt.utils.platform
import salt.utils.stringutils
from salt.ext import six
from salt.ext.six.moves import range
# pylint: disable=import-error
try:
import dateutil.tz
_DATEUTIL_TZ = True
except ImportError:
_DATEUTIL_TZ = False
__proxyenabled__ = ["*"]
__FQDN__ = None
# linux_distribution deprecated in py3.7
try:
from platform import linux_distribution as _deprecated_linux_distribution
# Extend the default list of supported distros. This will be used for the
# /etc/DISTRO-release checking that is part of linux_distribution()
from platform import _supported_dists
_supported_dists += (
"arch",
"mageia",
"meego",
"vmware",
"bluewhite64",
"slamd64",
"ovs",
"system",
"mint",
"oracle",
"void",
)
def linux_distribution(**kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return _deprecated_linux_distribution(
supported_dists=_supported_dists, **kwargs
)
except ImportError:
from distro import linux_distribution
# Import salt libs
import salt.exceptions
import salt.log
import salt.utils.args
import salt.utils.dns
import salt.utils.files
import salt.utils.network
import salt.utils.path
import salt.utils.pkg.rpm
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.versions
from salt.ext import six
from salt.ext.six.moves import range
if salt.utils.platform.is_windows():
import salt.utils.win_osinfo
__salt__ = {
"cmd.run": salt.modules.cmdmod._run_quiet,
"cmd.retcode": salt.modules.cmdmod._retcode_quiet,
"cmd.run_all": salt.modules.cmdmod._run_all_quiet,
"smbios.records": salt.modules.smbios.records,
"smbios.get": salt.modules.smbios.get,
}
log = logging.getLogger(__name__)
HAS_WMI = False
if salt.utils.platform.is_windows():
# attempt to import the python wmi module
# the Windows minion uses WMI for some of its grains
try:
import wmi # pylint: disable=import-error
import salt.utils.winapi
import win32api
import salt.utils.win_reg
HAS_WMI = True
except ImportError:
log.exception(
"Unable to import Python wmi module, some core grains " "will be missing"
)
HAS_UNAME = True
if not hasattr(os, "uname"):
HAS_UNAME = False
_INTERFACES = {}
# Possible value for h_errno defined in netdb.h
HOST_NOT_FOUND = 1
NO_DATA = 4
def _windows_cpudata():
"""
Return some CPU information on Windows minions
"""
# Provides:
# num_cpus
# cpu_model
grains = {}
if "NUMBER_OF_PROCESSORS" in os.environ:
# Cast to int so that the logic isn't broken when used as a
# conditional in templating. Also follows _linux_cpudata()
try:
grains["num_cpus"] = int(os.environ["NUMBER_OF_PROCESSORS"])
except ValueError:
grains["num_cpus"] = 1
grains["cpu_model"] = salt.utils.win_reg.read_value(
hive="HKEY_LOCAL_MACHINE",
key="HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0",
vname="ProcessorNameString",
).get("vdata")
return grains
def _linux_cpudata():
"""
Return some CPU information for Linux minions
"""
# Provides:
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cpuinfo = "/proc/cpuinfo"
# Parse over the cpuinfo file
if os.path.isfile(cpuinfo):
with salt.utils.files.fopen(cpuinfo, 'r') as _fp:
grains['num_cpus'] = 0
for line in _fp:
comps = line.split(":")
if not len(comps) > 1:
continue
key = comps[0].strip()
val = comps[1].strip()
if key == 'processor':
grains['num_cpus'] += 1
elif key == 'model name':
grains['cpu_model'] = val
elif key == 'flags':
grains['cpu_flags'] = val.split()
elif key == 'Features':
grains['cpu_flags'] = val.split()
# ARM support - /proc/cpuinfo
#
# Processor : ARMv6-compatible processor rev 7 (v6l)
# BogoMIPS : 697.95
# Features : swp half thumb fastmult vfp edsp java tls
# CPU implementer : 0x41
# CPU architecture: 7
# CPU variant : 0x0
# CPU part : 0xb76
# CPU revision : 7
#
# Hardware : BCM2708
# Revision : 0002
# Serial : 00000000
elif key == "Processor":
grains["cpu_model"] = val.split("-")[0]
grains["num_cpus"] = 1
if "num_cpus" not in grains:
grains["num_cpus"] = 0
if "cpu_model" not in grains:
grains["cpu_model"] = "Unknown"
if "cpu_flags" not in grains:
grains["cpu_flags"] = []
return grains
def _linux_gpu_data():
"""
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
"""
if __opts__.get("enable_lspci", True) is False:
return {}
if __opts__.get("enable_gpu_grains", True) is False:
return {}
lspci = salt.utils.path.which("lspci")
if not lspci:
log.debug(
"The `lspci` binary is not available on the system. GPU grains "
"will not be available."
)
return {}
# dominant gpu vendors to search for (MUST be lowercase for matching below)
known_vendors = [
"nvidia",
"amd",
"ati",
"intel",
"cirrus logic",
"vmware",
"matrox",
"aspeed",
]
gpu_classes = ("vga compatible controller", "3d controller")
devs = []
try:
lspci_out = __salt__["cmd.run"]("{0} -vmm".format(lspci))
cur_dev = {}
error = False
# Add a blank element to the lspci_out.splitlines() list,
# otherwise the last device is not evaluated as a cur_dev and ignored.
lspci_list = lspci_out.splitlines()
lspci_list.append("")
for line in lspci_list:
# check for record-separating empty lines
if line == "":
if cur_dev.get("Class", "").lower() in gpu_classes:
devs.append(cur_dev)
cur_dev = {}
continue
if re.match(r"^\w+:\s+.*", line):
key, val = line.split(":", 1)
cur_dev[key.strip()] = val.strip()
else:
error = True
log.debug("Unexpected lspci output: '%s'", line)
if error:
log.warning(
"Error loading grains, unexpected linux_gpu_data output, "
"check that you have a valid shell configured and "
"permissions to run lspci command"
)
except OSError:
pass
gpus = []
for gpu in devs:
vendor_strings = re.split('[^A-Za-z0-9]', gpu['Vendor'].lower())
# default vendor to 'unknown', overwrite if we match a known one
vendor = "unknown"
for name in known_vendors:
# search for an 'expected' vendor name in the list of strings
if name in vendor_strings:
vendor = name
break
gpus.append({"vendor": vendor, "model": gpu["Device"]})
grains = {}
grains["num_gpus"] = len(gpus)
grains["gpus"] = gpus
return grains
def _netbsd_gpu_data():
"""
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
"""
known_vendors = [
"nvidia",
"amd",
"ati",
"intel",
"cirrus logic",
"vmware",
"matrox",
"aspeed",
]
gpus = []
try:
pcictl_out = __salt__["cmd.run"]("pcictl pci0 list")
for line in pcictl_out.splitlines():
for vendor in known_vendors:
vendor_match = re.match(
r"[0-9:]+ ({0}) (.+) \(VGA .+\)".format(vendor), line, re.IGNORECASE
)
if vendor_match:
gpus.append(
{
"vendor": vendor_match.group(1),
"model": vendor_match.group(2),
}
)
except OSError:
pass
grains = {}
grains["num_gpus"] = len(gpus)
grains["gpus"] = gpus
return grains
def _osx_gpudata():
"""
num_gpus: int
gpus:
- vendor: nvidia|amd|ati|...
model: string
"""
gpus = []
try:
pcictl_out = __salt__["cmd.run"]("system_profiler SPDisplaysDataType")
for line in pcictl_out.splitlines():
fieldname, _, fieldval = line.partition(": ")
if fieldname.strip() == "Chipset Model":
vendor, _, model = fieldval.partition(" ")
vendor = vendor.lower()
gpus.append({"vendor": vendor, "model": model})
except OSError:
pass
grains = {}
grains["num_gpus"] = len(gpus)
grains["gpus"] = gpus
return grains
def _bsd_cpudata(osdata):
"""
Return CPU information for BSD-like systems
"""
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
sysctl = salt.utils.path.which("sysctl")
arch = salt.utils.path.which("arch")
cmds = {}
if sysctl:
cmds.update(
{
"num_cpus": "{0} -n hw.ncpu".format(sysctl),
"cpuarch": "{0} -n hw.machine".format(sysctl),
"cpu_model": "{0} -n hw.model".format(sysctl),
}
)
if arch and osdata["kernel"] == "OpenBSD":
cmds["cpuarch"] = "{0} -s".format(arch)
if osdata["kernel"] == "Darwin":
cmds["cpu_model"] = "{0} -n machdep.cpu.brand_string".format(sysctl)
cmds["cpu_flags"] = "{0} -n machdep.cpu.features".format(sysctl)
grains = dict([(k, __salt__["cmd.run"](v)) for k, v in six.iteritems(cmds)])
if "cpu_flags" in grains and isinstance(grains["cpu_flags"], six.string_types):
grains["cpu_flags"] = grains["cpu_flags"].split(" ")
if osdata["kernel"] == "NetBSD":
grains["cpu_flags"] = []
for line in __salt__["cmd.run"]("cpuctl identify 0").splitlines():
cpu_match = re.match(r"cpu[0-9]:\ features[0-9]?\ .+<(.+)>", line)
if cpu_match:
flag = cpu_match.group(1).split(",")
grains["cpu_flags"].extend(flag)
if osdata["kernel"] == "FreeBSD" and os.path.isfile("/var/run/dmesg.boot"):
grains["cpu_flags"] = []
# TODO: at least it needs to be tested for BSD other then FreeBSD
with salt.utils.files.fopen("/var/run/dmesg.boot", "r") as _fp:
cpu_here = False
for line in _fp:
if line.startswith("CPU: "):
cpu_here = True # starts CPU descr
continue
if cpu_here:
if not line.startswith(" "):
break # game over
if "Features" in line:
start = line.find("<")
end = line.find(">")
if start > 0 and end > 0:
flag = line[start + 1 : end].split(",")
grains["cpu_flags"].extend(flag)
try:
grains["num_cpus"] = int(grains["num_cpus"])
except ValueError:
grains["num_cpus"] = 1
return grains
def _sunos_cpudata():
"""
Return the CPU information for Solaris-like systems
"""
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
grains["cpu_flags"] = []
grains["cpuarch"] = __salt__["cmd.run"]("isainfo -k")
psrinfo = "/usr/sbin/psrinfo 2>/dev/null"
grains["num_cpus"] = len(
__salt__["cmd.run"](psrinfo, python_shell=True).splitlines()
)
kstat_info = "kstat -p cpu_info:*:*:brand"
for line in __salt__["cmd.run"](kstat_info).splitlines():
match = re.match(r"(\w+:\d+:\w+\d+:\w+)\s+(.+)", line)
if match:
grains["cpu_model"] = match.group(2)
isainfo = "isainfo -n -v"
for line in __salt__["cmd.run"](isainfo).splitlines():
match = re.match(r"^\s+(.+)", line)
if match:
cpu_flags = match.group(1).split()
grains["cpu_flags"].extend(cpu_flags)
return grains
def _aix_cpudata():
"""
Return CPU information for AIX systems
"""
# Provides:
# cpuarch
# num_cpus
# cpu_model
# cpu_flags
grains = {}
cmd = salt.utils.path.which("prtconf")
if cmd:
data = __salt__["cmd.run"]("{0}".format(cmd)) + os.linesep
for dest, regstring in (
("cpuarch", r"(?im)^\s*Processor\s+Type:\s+(\S+)"),
("cpu_flags", r"(?im)^\s*Processor\s+Version:\s+(\S+)"),
("cpu_model", r"(?im)^\s*Processor\s+Implementation\s+Mode:\s+(.*)"),
("num_cpus", r"(?im)^\s*Number\s+Of\s+Processors:\s+(\S+)"),
):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", "")
else:
log.error("The 'prtconf' binary was not found in $PATH.")
return grains
def _linux_memdata():
"""
Return the memory information for Linux-like systems
"""
grains = {"mem_total": 0, "swap_total": 0}
meminfo = "/proc/meminfo"
if os.path.isfile(meminfo):
with salt.utils.files.fopen(meminfo, "r") as ifile:
for line in ifile:
comps = line.rstrip("\n").split(":")
if not len(comps) > 1:
continue
if comps[0].strip() == "MemTotal":
# Use floor division to force output to be an integer
grains["mem_total"] = int(comps[1].split()[0]) // 1024
if comps[0].strip() == "SwapTotal":
# Use floor division to force output to be an integer
grains["swap_total"] = int(comps[1].split()[0]) // 1024
return grains
def _osx_memdata():
"""
Return the memory information for BSD-like systems
"""
grains = {"mem_total": 0, "swap_total": 0}
sysctl = salt.utils.path.which("sysctl")
if sysctl:
mem = __salt__["cmd.run"]("{0} -n hw.memsize".format(sysctl))
swap_total = (
__salt__["cmd.run"]("{0} -n vm.swapusage".format(sysctl))
.split()[2]
.replace(",", ".")
)
if swap_total.endswith("K"):
_power = 2 ** 10
elif swap_total.endswith("M"):
_power = 2 ** 20
elif swap_total.endswith("G"):
_power = 2 ** 30
swap_total = float(swap_total[:-1]) * _power
grains["mem_total"] = int(mem) // 1024 // 1024
grains["swap_total"] = int(swap_total) // 1024 // 1024
return grains
def _bsd_memdata(osdata):
"""
Return the memory information for BSD-like systems
"""
grains = {"mem_total": 0, "swap_total": 0}
sysctl = salt.utils.path.which("sysctl")
if sysctl:
mem = __salt__["cmd.run"]("{0} -n hw.physmem".format(sysctl))
if osdata["kernel"] == "NetBSD" and mem.startswith("-"):
mem = __salt__["cmd.run"]("{0} -n hw.physmem64".format(sysctl))
grains["mem_total"] = int(mem) // 1024 // 1024
if osdata["kernel"] in ["OpenBSD", "NetBSD"]:
swapctl = salt.utils.path.which("swapctl")
swap_data = __salt__["cmd.run"]("{0} -sk".format(swapctl))
if swap_data == "no swap devices configured":
swap_total = 0
else:
swap_total = swap_data.split(" ")[1]
else:
swap_total = __salt__["cmd.run"]("{0} -n vm.swap_total".format(sysctl))
grains["swap_total"] = int(swap_total) // 1024 // 1024
return grains
def _sunos_memdata():
"""
Return the memory information for SunOS-like systems
"""
grains = {"mem_total": 0, "swap_total": 0}
prtconf = "/usr/sbin/prtconf 2>/dev/null"
for line in __salt__["cmd.run"](prtconf, python_shell=True).splitlines():
comps = line.split(" ")
if comps[0].strip() == "Memory" and comps[1].strip() == "size:":
grains["mem_total"] = int(comps[2].strip())
swap_cmd = salt.utils.path.which("swap")
swap_data = __salt__["cmd.run"]("{0} -s".format(swap_cmd)).split()
try:
swap_avail = int(swap_data[-2][:-1])
swap_used = int(swap_data[-4][:-1])
swap_total = (swap_avail + swap_used) // 1024
except ValueError:
swap_total = None
grains["swap_total"] = swap_total
return grains
def _aix_memdata():
"""
Return the memory information for AIX systems
"""
grains = {"mem_total": 0, "swap_total": 0}
prtconf = salt.utils.path.which("prtconf")
if prtconf:
for line in __salt__["cmd.run"](prtconf, python_shell=True).splitlines():
comps = [x for x in line.strip().split(" ") if x]
if len(comps) > 2 and "Memory" in comps[0] and "Size" in comps[1]:
grains["mem_total"] = int(comps[2])
break
else:
log.error("The 'prtconf' binary was not found in $PATH.")
swap_cmd = salt.utils.path.which("swap")
if swap_cmd:
swap_data = __salt__["cmd.run"]("{0} -s".format(swap_cmd)).split()
try:
swap_total = (int(swap_data[-2]) + int(swap_data[-6])) * 4
except ValueError:
swap_total = None
grains["swap_total"] = swap_total
else:
log.error("The 'swap' binary was not found in $PATH.")
return grains
def _windows_memdata():
"""
Return the memory information for Windows systems
"""
grains = {"mem_total": 0}
# get the Total Physical memory as reported by msinfo32
tot_bytes = win32api.GlobalMemoryStatusEx()["TotalPhys"]
# return memory info in gigabytes
grains["mem_total"] = int(tot_bytes / (1024 ** 2))
return grains
def _memdata(osdata):
"""
Gather information about the system memory
"""
# Provides:
# mem_total
# swap_total, for supported systems.
grains = {"mem_total": 0}
if osdata["kernel"] == "Linux":
grains.update(_linux_memdata())
elif osdata["kernel"] in ("FreeBSD", "OpenBSD", "NetBSD"):
grains.update(_bsd_memdata(osdata))
elif osdata["kernel"] == "Darwin":
grains.update(_osx_memdata())
elif osdata["kernel"] == "SunOS":
grains.update(_sunos_memdata())
elif osdata["kernel"] == "AIX":
grains.update(_aix_memdata())
elif osdata["kernel"] == "Windows" and HAS_WMI:
grains.update(_windows_memdata())
return grains
def _aix_get_machine_id():
"""
Parse the output of lsattr -El sys0 for os_uuid
"""
grains = {}
cmd = salt.utils.path.which("lsattr")
if cmd:
data = __salt__["cmd.run"]("{0} -El sys0".format(cmd)) + os.linesep
uuid_regexes = [re.compile(r"(?im)^\s*os_uuid\s+(\S+)\s+(.*)")]
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains["machine_id"] = res.group(1).strip()
break
else:
log.error("The 'lsattr' binary was not found in $PATH.")
return grains
def _windows_virtual(osdata):
"""
Returns what type of virtual hardware is under the hood, kvm or physical
"""
# Provides:
# virtual
# virtual_subtype
grains = dict()
if osdata["kernel"] != "Windows":
return grains
grains["virtual"] = osdata.get("virtual", "physical")
# It is possible that the 'manufacturer' and/or 'productname' grains
# exist but have a value of None.
manufacturer = osdata.get("manufacturer", "")
if manufacturer is None:
manufacturer = ""
productname = osdata.get("productname", "")
if productname is None:
productname = ""
if "QEMU" in manufacturer:
# FIXME: Make this detect between kvm or qemu
grains["virtual"] = "kvm"
if "Bochs" in manufacturer:
grains["virtual"] = "kvm"
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif "oVirt" in productname:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "oVirt"
# Red Hat Enterprise Virtualization
elif "RHEV Hypervisor" in productname:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "rhev"
# Product Name: VirtualBox
elif "VirtualBox" in productname:
grains["virtual"] = "VirtualBox"
# Product Name: VMware Virtual Platform
elif "VMware Virtual Platform" in productname:
grains["virtual"] = "VMware"
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif "Microsoft" in manufacturer and "Virtual Machine" in productname:
grains["virtual"] = "VirtualPC"
# Manufacturer: Parallels Software International Inc.
elif "Parallels Software" in manufacturer:
grains["virtual"] = "Parallels"
# Apache CloudStack
elif "CloudStack KVM Hypervisor" in productname:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "cloudstack"
return grains
def _virtual(osdata):
"""
Returns what type of virtual hardware is under the hood, kvm or physical
"""
# This is going to be a monster, if you are running a vm you can test this
# grain with please submit patches!
# Provides:
# virtual
# virtual_subtype
grains = {"virtual": osdata.get("virtual", "physical")}
# Skip the below loop on platforms which have none of the desired cmds
# This is a temporary measure until we can write proper virtual hardware
# detection.
skip_cmds = ("AIX",)
# list of commands to be executed to determine the 'virtual' grain
_cmds = ["systemd-detect-virt", "virt-what", "dmidecode"]
# test first for virt-what, which covers most of the desired functionality
# on most platforms
if not salt.utils.platform.is_windows() and osdata["kernel"] not in skip_cmds:
if salt.utils.path.which("virt-what"):
_cmds = ["virt-what"]
# Check if enable_lspci is True or False
if __opts__.get("enable_lspci", True) is True:
# /proc/bus/pci does not exists, lspci will fail
if os.path.exists("/proc/bus/pci"):
_cmds += ["lspci"]
# Add additional last resort commands
if osdata["kernel"] in skip_cmds:
_cmds = ()
# Quick backout for BrandZ (Solaris LX Branded zones)
# Don't waste time trying other commands to detect the virtual grain
if (
HAS_UNAME
and osdata["kernel"] == "Linux"
and "BrandZ virtual linux" in os.uname()
):
grains["virtual"] = "zone"
return grains
failed_commands = set()
for command in _cmds:
args = []
if osdata["kernel"] == "Darwin":
command = "system_profiler"
args = ["SPDisplaysDataType"]
elif osdata["kernel"] == "SunOS":
virtinfo = salt.utils.path.which("virtinfo")
if virtinfo:
try:
ret = __salt__["cmd.run_all"]("{0} -a".format(virtinfo))
except salt.exceptions.CommandExecutionError:
if salt.log.is_logging_configured():
failed_commands.add(virtinfo)
else:
if ret["stdout"].endswith("not supported"):
command = "prtdiag"
else:
command = "virtinfo"
else:
command = "prtdiag"
cmd = salt.utils.path.which(command)
if not cmd:
continue
cmd = "{0} {1}".format(cmd, " ".join(args))
try:
ret = __salt__["cmd.run_all"](cmd)
if ret["retcode"] > 0:
if salt.log.is_logging_configured():
# systemd-detect-virt always returns > 0 on non-virtualized
# systems
# prtdiag only works in the global zone, skip if it fails
if (
salt.utils.platform.is_windows()
or "systemd-detect-virt" in cmd
or "prtdiag" in cmd
):
continue
failed_commands.add(command)
continue
except salt.exceptions.CommandExecutionError:
if salt.log.is_logging_configured():
if salt.utils.platform.is_windows():
continue
failed_commands.add(command)
continue
output = ret["stdout"]
if command == "system_profiler":
macoutput = output.lower()
if "0x1ab8" in macoutput:
grains["virtual"] = "Parallels"
if "parallels" in macoutput:
grains["virtual"] = "Parallels"
if "vmware" in macoutput:
grains["virtual"] = "VMware"
if "0x15ad" in macoutput:
grains["virtual"] = "VMware"
if "virtualbox" in macoutput:
grains["virtual"] = "VirtualBox"
# Break out of the loop so the next log message is not issued
break
elif command == "systemd-detect-virt":
if output in (
"qemu",
"kvm",
"oracle",
"xen",
"bochs",
"chroot",
"uml",
"systemd-nspawn",
):
grains["virtual"] = output
break
elif "vmware" in output:
grains["virtual"] = "VMware"
break
elif "microsoft" in output:
grains["virtual"] = "VirtualPC"
break
elif "lxc" in output:
grains["virtual"] = "LXC"
break
elif "systemd-nspawn" in output:
grains["virtual"] = "LXC"
break
elif command == "virt-what":
try:
output = output.splitlines()[-1]
except IndexError:
pass
if output in ("kvm", "qemu", "uml", "xen", "lxc"):
grains["virtual"] = output
break
elif "vmware" in output:
grains["virtual"] = "VMware"
break
elif "parallels" in output:
grains["virtual"] = "Parallels"
break
elif "hyperv" in output:
grains["virtual"] = "HyperV"
break
elif command == "dmidecode":
# Product Name: VirtualBox
if "Vendor: QEMU" in output:
# FIXME: Make this detect between kvm or qemu
grains["virtual"] = "kvm"
if "Manufacturer: QEMU" in output:
grains["virtual"] = "kvm"
if "Vendor: Bochs" in output:
grains["virtual"] = "kvm"
if "Manufacturer: Bochs" in output:
grains["virtual"] = "kvm"
if "BHYVE" in output:
grains["virtual"] = "bhyve"
# Product Name: (oVirt) www.ovirt.org
# Red Hat Community virtualization Project based on kvm
elif "Manufacturer: oVirt" in output:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "ovirt"
# Red Hat Enterprise Virtualization
elif "Product Name: RHEV Hypervisor" in output:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "rhev"
elif "VirtualBox" in output:
grains["virtual"] = "VirtualBox"
# Product Name: VMware Virtual Platform
elif "VMware" in output:
grains["virtual"] = "VMware"
# Manufacturer: Microsoft Corporation
# Product Name: Virtual Machine
elif ": Microsoft" in output and "Virtual Machine" in output:
grains["virtual"] = "VirtualPC"
# Manufacturer: Parallels Software International Inc.
elif "Parallels Software" in output:
grains["virtual"] = "Parallels"
elif "Manufacturer: Google" in output:
grains["virtual"] = "kvm"
# Proxmox KVM
elif "Vendor: SeaBIOS" in output:
grains["virtual"] = "kvm"
# Break out of the loop, lspci parsing is not necessary
break
elif command == "lspci":
# dmidecode not available or the user does not have the necessary
# permissions
model = output.lower()
if "vmware" in model:
grains["virtual"] = "VMware"
# 00:04.0 System peripheral: InnoTek Systemberatung GmbH
# VirtualBox Guest Service
elif "virtualbox" in model:
grains["virtual"] = "VirtualBox"
elif "qemu" in model:
grains["virtual"] = "kvm"
elif "virtio" in model:
grains["virtual"] = "kvm"
# Break out of the loop so the next log message is not issued
break
elif command == "prtdiag":
model = output.lower().split("\n")[0]
if "vmware" in model:
grains["virtual"] = "VMware"
elif "virtualbox" in model:
grains["virtual"] = "VirtualBox"
elif "qemu" in model:
grains["virtual"] = "kvm"
elif "joyent smartdc hvm" in model:
grains["virtual"] = "kvm"
break
elif command == "virtinfo":
grains["virtual"] = "LDOM"
break
choices = ("Linux", "HP-UX")
isdir = os.path.isdir
sysctl = salt.utils.path.which("sysctl")
if osdata["kernel"] in choices:
if os.path.isdir("/proc"):
try:
self_root = os.stat("/")
init_root = os.stat("/proc/1/root/.")
if self_root != init_root:
grains["virtual_subtype"] = "chroot"
except (IOError, OSError):
pass
if isdir("/proc/vz"):
if os.path.isfile("/proc/vz/version"):
grains["virtual"] = "openvzhn"
elif os.path.isfile("/proc/vz/veinfo"):
grains["virtual"] = "openvzve"
# a posteriori, it's expected for these to have failed:
failed_commands.discard("lspci")
failed_commands.discard("dmidecode")
# Provide additional detection for OpenVZ
if os.path.isfile("/proc/self/status"):
with salt.utils.files.fopen("/proc/self/status") as status_file:
vz_re = re.compile(r"^envID:\s+(\d+)$")
for line in status_file:
vz_match = vz_re.match(line.rstrip("\n"))
if vz_match and int(vz_match.groups()[0]) != 0:
grains["virtual"] = "openvzve"
elif vz_match and int(vz_match.groups()[0]) == 0:
grains["virtual"] = "openvzhn"
if isdir("/proc/sys/xen") or isdir("/sys/bus/xen") or isdir("/proc/xen"):
if os.path.isfile("/proc/xen/xsd_kva"):
# Tested on CentOS 5.3 / 2.6.18-194.26.1.el5xen
# Tested on CentOS 5.4 / 2.6.18-164.15.1.el5xen
grains["virtual_subtype"] = "Xen Dom0"
else:
if osdata.get("productname", "") == "HVM domU":
# Requires dmidecode!
grains["virtual_subtype"] = "Xen HVM DomU"
elif os.path.isfile("/proc/xen/capabilities") and os.access(
"/proc/xen/capabilities", os.R_OK
):
with salt.utils.files.fopen("/proc/xen/capabilities") as fhr:
if "control_d" not in fhr.read():
# Tested on CentOS 5.5 / 2.6.18-194.3.1.el5xen
grains["virtual_subtype"] = "Xen PV DomU"
else:
# Shouldn't get to this, but just in case
grains["virtual_subtype"] = "Xen Dom0"
# Tested on Fedora 10 / 2.6.27.30-170.2.82 with xen
# Tested on Fedora 15 / 2.6.41.4-1 without running xen
elif isdir("/sys/bus/xen"):
if "xen:" in __salt__["cmd.run"]("dmesg").lower():
grains["virtual_subtype"] = "Xen PV DomU"
elif os.path.isfile("/sys/bus/xen/drivers/xenconsole"):
# An actual DomU will have the xenconsole driver
grains["virtual_subtype"] = "Xen PV DomU"
# If a Dom0 or DomU was detected, obviously this is xen
if "dom" in grains.get("virtual_subtype", "").lower():
grains["virtual"] = "xen"
# Check container type after hypervisors, to avoid variable overwrite on containers running in virtual environment.
if os.path.isfile("/proc/1/cgroup"):
try:
with salt.utils.files.fopen("/proc/1/cgroup", "r") as fhr:
fhr_contents = fhr.read()
if ":/lxc/" in fhr_contents:
grains["virtual"] = "container"
grains["virtual_subtype"] = "LXC"
elif ":/kubepods/" in fhr_contents:
grains["virtual_subtype"] = "kubernetes"
elif ":/libpod_parent/" in fhr_contents:
grains["virtual_subtype"] = "libpod"
else:
if any(
x in fhr_contents
for x in (":/system.slice/docker", ":/docker/", ":/docker-ce/")
):
grains["virtual"] = "container"
grains["virtual_subtype"] = "Docker"
except IOError:
pass
if os.path.isfile("/proc/cpuinfo"):
with salt.utils.files.fopen("/proc/cpuinfo", "r") as fhr:
if "QEMU Virtual CPU" in fhr.read():
grains["virtual"] = "kvm"
if os.path.isfile("/sys/devices/virtual/dmi/id/product_name"):
try:
with salt.utils.files.fopen(
"/sys/devices/virtual/dmi/id/product_name", "r"
) as fhr:
output = salt.utils.stringutils.to_unicode(
fhr.read(), errors="replace"
)
if "VirtualBox" in output:
grains["virtual"] = "VirtualBox"
elif "RHEV Hypervisor" in output:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "rhev"
elif "oVirt Node" in output:
grains["virtual"] = "kvm"
grains["virtual_subtype"] = "ovirt"
elif "Google" in output:
grains["virtual"] = "gce"
elif "BHYVE" in output:
grains["virtual"] = "bhyve"
except UnicodeDecodeError:
# Some firmwares provide non-valid 'product_name'
# files, ignore them
log.debug(
"The content in /sys/devices/virtual/dmi/id/product_name is not valid"
)
except IOError:
pass
elif osdata["kernel"] == "FreeBSD":
kenv = salt.utils.path.which("kenv")
if kenv:
product = __salt__["cmd.run"]("{0} smbios.system.product".format(kenv))
maker = __salt__["cmd.run"]("{0} smbios.system.maker".format(kenv))
if product.startswith("VMware"):
grains["virtual"] = "VMware"
if product.startswith("VirtualBox"):
grains["virtual"] = "VirtualBox"
if maker.startswith("Xen"):
grains["virtual_subtype"] = "{0} {1}".format(maker, product)
grains["virtual"] = "xen"
if maker.startswith("Microsoft") and product.startswith("Virtual"):
grains["virtual"] = "VirtualPC"
if maker.startswith("OpenStack"):
grains["virtual"] = "OpenStack"
if maker.startswith("Bochs"):
grains["virtual"] = "kvm"
if sysctl:
hv_vendor = __salt__["cmd.run"]("{0} -n hw.hv_vendor".format(sysctl))
model = __salt__["cmd.run"]("{0} -n hw.model".format(sysctl))
jail = __salt__["cmd.run"]("{0} -n security.jail.jailed".format(sysctl))
if "bhyve" in hv_vendor:
grains["virtual"] = "bhyve"
elif "QEMU Virtual CPU" in model:
grains["virtual"] = "kvm"
if jail == "1":
grains["virtual_subtype"] = "jail"
elif osdata["kernel"] == "OpenBSD":
if "manufacturer" in osdata:
if osdata["manufacturer"] in ["QEMU", "Red Hat", "Joyent"]:
grains["virtual"] = "kvm"
if osdata["manufacturer"] == "OpenBSD":
grains["virtual"] = "vmm"
elif osdata["kernel"] == "SunOS":
if grains["virtual"] == "LDOM":
roles = []
for role in ("control", "io", "root", "service"):
subtype_cmd = "{0} -c current get -H -o value {1}-role".format(
cmd, role
)
ret = __salt__["cmd.run_all"]("{0}".format(subtype_cmd))
if ret["stdout"] == "true":
roles.append(role)
if roles:
grains["virtual_subtype"] = roles
else:
# Check if it's a "regular" zone. (i.e. Solaris 10/11 zone)
zonename = salt.utils.path.which("zonename")
if zonename:
zone = __salt__["cmd.run"]("{0}".format(zonename))
if zone != "global":
grains["virtual"] = "zone"
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if isdir("/.SUNWnative"):
grains["virtual"] = "zone"
elif osdata["kernel"] == "NetBSD":
if sysctl:
if "QEMU Virtual CPU" in __salt__["cmd.run"](
"{0} -n machdep.cpu_brand".format(sysctl)
):
grains["virtual"] = "kvm"
elif "invalid" not in __salt__["cmd.run"](
"{0} -n machdep.xen.suspend".format(sysctl)
):
grains["virtual"] = "Xen PV DomU"
elif "VMware" in __salt__["cmd.run"](
"{0} -n machdep.dmi.system-vendor".format(sysctl)
):
grains["virtual"] = "VMware"
# NetBSD has Xen dom0 support
elif (
__salt__["cmd.run"]("{0} -n machdep.idle-mechanism".format(sysctl))
== "xen"
):
if os.path.isfile("/var/run/xenconsoled.pid"):
grains["virtual_subtype"] = "Xen Dom0"
# If we have a virtual_subtype, we're virtual, but maybe we couldn't
# figure out what specific virtual type we were?
if grains.get("virtual_subtype") and grains["virtual"] == "physical":
grains["virtual"] = "virtual"
for command in failed_commands:
log.info(
"Although '%s' was found in path, the current user "
"cannot execute it. Grains output might not be "
"accurate.",
command,
)
return grains
def _virtual_hv(osdata):
"""
Returns detailed hypervisor information from sysfs
Currently this seems to be used only by Xen
"""
grains = {}
# Bail early if we're not running on Xen
try:
if "xen" not in osdata["virtual"]:
return grains
except KeyError:
return grains
# Try to get the exact hypervisor version from sysfs
try:
version = {}
for fn in ("major", "minor", "extra"):
with salt.utils.files.fopen(
"/sys/hypervisor/version/{}".format(fn), "r"
) as fhr:
version[fn] = salt.utils.stringutils.to_unicode(fhr.read().strip())
grains["virtual_hv_version"] = "{}.{}{}".format(
version["major"], version["minor"], version["extra"]
)
grains["virtual_hv_version_info"] = [
version["major"],
version["minor"],
version["extra"],
]
except (IOError, OSError, KeyError):
pass
# Try to read and decode the supported feature set of the hypervisor
# Based on https://github.com/brendangregg/Misc/blob/master/xen/xen-features.py
# Table data from include/xen/interface/features.h
xen_feature_table = {
0: "writable_page_tables",
1: "writable_descriptor_tables",
2: "auto_translated_physmap",
3: "supervisor_mode_kernel",
4: "pae_pgdir_above_4gb",
5: "mmu_pt_update_preserve_ad",
7: "gnttab_map_avail_bits",
8: "hvm_callback_vector",
9: "hvm_safe_pvclock",
10: "hvm_pirqs",
11: "dom0",
12: "grant_map_identity",
13: "memory_op_vnode_supported",
14: "ARM_SMCCC_supported",
}
try:
with salt.utils.files.fopen("/sys/hypervisor/properties/features", "r") as fhr:
features = salt.utils.stringutils.to_unicode(fhr.read().strip())
enabled_features = []
for bit, feat in six.iteritems(xen_feature_table):
if int(features, 16) & (1 << bit):
enabled_features.append(feat)
grains["virtual_hv_features"] = features
grains["virtual_hv_features_list"] = enabled_features
except (IOError, OSError, KeyError):
pass
return grains
def _ps(osdata):
"""
Return the ps grain
"""
grains = {}
bsd_choices = ("FreeBSD", "NetBSD", "OpenBSD", "MacOS")
if osdata["os"] in bsd_choices:
grains["ps"] = "ps auxwww"
elif osdata["os_family"] == "Solaris":
grains["ps"] = "/usr/ucb/ps auxwww"
elif osdata["os"] == "Windows":
grains["ps"] = "tasklist.exe"
elif osdata.get("virtual", "") == "openvzhn":
grains["ps"] = (
'ps -fH -p $(grep -l "^envID:[[:space:]]*0\\$" '
'/proc/[0-9]*/status | sed -e "s=/proc/\\([0-9]*\\)/.*=\\1=") '
"| awk '{ $7=\"\"; print }'"
)
elif osdata["os_family"] == "AIX":
grains["ps"] = "/usr/bin/ps auxww"
elif osdata["os_family"] == "NILinuxRT":
grains["ps"] = "ps -o user,pid,ppid,tty,time,comm"
else:
grains["ps"] = "ps -efHww"
return grains
def _clean_value(key, val):
"""
Clean out well-known bogus values.
If it isn't clean (for example has value 'None'), return None.
Otherwise, return the original value.
NOTE: This logic also exists in the smbios module. This function is
for use when not using smbios to retrieve the value.
"""
if val is None or not val or re.match("none", val, flags=re.IGNORECASE):
return None
elif "uuid" in key:
# Try each version (1-5) of RFC4122 to check if it's actually a UUID
for uuidver in range(1, 5):
try:
uuid.UUID(val, version=uuidver)
return val
except ValueError:
continue
log.trace("HW %s value %s is an invalid UUID", key, val.replace("\n", " "))
return None
elif re.search("serial|part|version", key):
# 'To be filled by O.E.M.
# 'Not applicable' etc.
# 'Not specified' etc.
# 0000000, 1234567 etc.
# begone!
if (
re.match(r"^[0]+$", val)
or re.match(r"[0]?1234567[8]?[9]?[0]?", val)
or re.search(
r"sernum|part[_-]?number|specified|filled|applicable",
val,
flags=re.IGNORECASE,
)
):
return None
elif re.search("asset|manufacturer", key):
# AssetTag0. Manufacturer04. Begone.
if re.search(
r"manufacturer|to be filled|available|asset|^no(ne|t)",
val,
flags=re.IGNORECASE,
):
return None
else:
# map unspecified, undefined, unknown & whatever to None
if re.search(r"to be filled", val, flags=re.IGNORECASE) or re.search(
r"un(known|specified)|no(t|ne)? (asset|provided|defined|available|present|specified)",
val,
flags=re.IGNORECASE,
):
return None
return val
def _windows_os_release_grain(caption, product_type):
"""
helper function for getting the osrelease grain
:return:
"""
# This creates the osrelease grain based on the Windows Operating
# System Product Name. As long as Microsoft maintains a similar format
# this should be future proof
version = "Unknown"
release = ""
if "Server" in caption:
# Edge case here to handle MS Product that doesn't contain a year
if re.match(r"^Microsoft Hyper-V Server$", caption):
version = "2019"
else:
for item in caption.split(" "):
# If it's all digits, then it's version
if re.match(r"\d+", item):
version = item
# If it starts with R and then numbers, it's the release
# ie: R2
if re.match(r"^R\d+$", item):
release = item
os_release = "{0}Server{1}".format(version, release)
else:
for item in caption.split(" "):
# If it's a number, decimal number, Thin or Vista, then it's the
# version
if re.match(r"^(\d+(\.\d+)?)|Thin|Vista|XP$", item):
version = item
os_release = version
# If the version is still Unknown, revert back to the old way of getting
# the os_release
# https://github.com/saltstack/salt/issues/52339
if os_release in ["Unknown"]:
os_release = platform.release()
server = {
"Vista": "2008Server",
"7": "2008ServerR2",
"8": "2012Server",
"8.1": "2012ServerR2",
"10": "2016Server",
}
# Starting with Python 2.7.12 and 3.5.2 the `platform.uname()`
# function started reporting the Desktop version instead of the
# Server version on # Server versions of Windows, so we need to look
# those up. So, if you find a Server Platform that's a key in the
# server dictionary, then lookup the actual Server Release.
# (Product Type 1 is Desktop, Everything else is Server)
if product_type > 1 and os_release in server:
os_release = server[os_release]
return os_release
def _windows_platform_data():
"""
Use the platform module for as much as we can.
"""
# Provides:
# kernelrelease
# kernelversion
# osversion
# osrelease
# osservicepack
# osmanufacturer
# manufacturer
# productname
# biosversion
# serialnumber
# osfullname
# timezone
# windowsdomain
# windowsdomaintype
# motherboard.productname
# motherboard.serialnumber
# virtual
if not HAS_WMI:
return {}
with salt.utils.winapi.Com():
wmi_c = wmi.WMI()
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394102%28v=vs.85%29.aspx
systeminfo = wmi_c.Win32_ComputerSystem()[0]
# https://msdn.microsoft.com/en-us/library/aa394239(v=vs.85).aspx
osinfo = wmi_c.Win32_OperatingSystem()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394077(v=vs.85).aspx
biosinfo = wmi_c.Win32_BIOS()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394498(v=vs.85).aspx
timeinfo = wmi_c.Win32_TimeZone()[0]
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa394072(v=vs.85).aspx
motherboard = {"product": None, "serial": None}
try:
motherboardinfo = wmi_c.Win32_BaseBoard()[0]
motherboard["product"] = motherboardinfo.Product
motherboard["serial"] = motherboardinfo.SerialNumber
except IndexError:
log.debug("Motherboard info not available on this system")
kernel_version = platform.version()
info = salt.utils.win_osinfo.get_os_version_info()
net_info = salt.utils.win_osinfo.get_join_info()
service_pack = None
if info["ServicePackMajor"] > 0:
service_pack = "".join(["SP", six.text_type(info["ServicePackMajor"])])
os_release = _windows_os_release_grain(
caption=osinfo.Caption, product_type=osinfo.ProductType
)
grains = {
"kernelrelease": _clean_value("kernelrelease", osinfo.Version),
"kernelversion": _clean_value("kernelversion", kernel_version),
"osversion": _clean_value("osversion", osinfo.Version),
"osrelease": _clean_value("osrelease", os_release),
"osservicepack": _clean_value("osservicepack", service_pack),
"osmanufacturer": _clean_value("osmanufacturer", osinfo.Manufacturer),
"manufacturer": _clean_value("manufacturer", systeminfo.Manufacturer),
"productname": _clean_value("productname", systeminfo.Model),
# bios name had a bunch of whitespace appended to it in my testing
# 'PhoenixBIOS 4.0 Release 6.0 '
"biosversion": _clean_value("biosversion", biosinfo.Name.strip()),
"serialnumber": _clean_value("serialnumber", biosinfo.SerialNumber),
"osfullname": _clean_value("osfullname", osinfo.Caption),
"timezone": _clean_value("timezone", timeinfo.Description),
"windowsdomain": _clean_value("windowsdomain", net_info["Domain"]),
"windowsdomaintype": _clean_value(
"windowsdomaintype", net_info["DomainType"]
),
"motherboard": {
"productname": _clean_value(
"motherboard.productname", motherboard["product"]
),
"serialnumber": _clean_value(
"motherboard.serialnumber", motherboard["serial"]
),
},
}
# test for virtualized environments
# I only had VMware available so the rest are unvalidated
if "VRTUAL" in biosinfo.Version: # (not a typo)
grains["virtual"] = "HyperV"
elif "A M I" in biosinfo.Version:
grains["virtual"] = "VirtualPC"
elif "VMware" in systeminfo.Model:
grains["virtual"] = "VMware"
elif "VirtualBox" in systeminfo.Model:
grains["virtual"] = "VirtualBox"
elif "Xen" in biosinfo.Version:
grains["virtual"] = "Xen"
if "HVM domU" in systeminfo.Model:
grains["virtual_subtype"] = "HVM domU"
elif "OpenStack" in systeminfo.Model:
grains["virtual"] = "OpenStack"
elif "AMAZON" in biosinfo.Version:
grains["virtual"] = "EC2"
return grains
def _osx_platform_data():
"""
Additional data for macOS systems
Returns: A dictionary containing values for the following:
- model_name
- boot_rom_version
- smc_version
- system_serialnumber
"""
cmd = "system_profiler SPHardwareDataType"
hardware = __salt__["cmd.run"](cmd)
grains = {}
for line in hardware.splitlines():
field_name, _, field_val = line.partition(": ")
if field_name.strip() == "Model Name":
key = "model_name"
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "Boot ROM Version":
key = "boot_rom_version"
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "SMC Version (system)":
key = "smc_version"
grains[key] = _clean_value(key, field_val)
if field_name.strip() == "Serial Number (system)":
key = "system_serialnumber"
grains[key] = _clean_value(key, field_val)
return grains
def id_():
"""
Return the id
"""
return {"id": __opts__.get("id", "")}
_REPLACE_LINUX_RE = re.compile(r"\W(?:gnu/)?linux", re.IGNORECASE)
# This maps (at most) the first ten characters (no spaces, lowercased) of
# 'osfullname' to the 'os' grain that Salt traditionally uses.
# Please see os_data() and _supported_dists.
# If your system is not detecting properly it likely needs an entry here.
_OS_NAME_MAP = {
"redhatente": "RedHat",
"gentoobase": "Gentoo",
"archarm": "Arch ARM",
"arch": "Arch",
"debian": "Debian",
"raspbian": "Raspbian",
"fedoraremi": "Fedora",
"chapeau": "Chapeau",
"korora": "Korora",
"amazonami": "Amazon",
"alt": "ALT",
"enterprise": "OEL",
"oracleserv": "OEL",
"cloudserve": "CloudLinux",
"cloudlinux": "CloudLinux",
"pidora": "Fedora",
"scientific": "ScientificLinux",
"synology": "Synology",
"nilrt": "NILinuxRT",
"poky": "Poky",
"manjaro": "Manjaro",
"manjarolin": "Manjaro",
"univention": "Univention",
"antergos": "Antergos",
"sles": "SUSE",
"void": "Void",
"slesexpand": "RES",
"linuxmint": "Mint",
"neon": "KDE neon",
}
# Map the 'os' grain to the 'os_family' grain
# These should always be capitalized entries as the lookup comes
# post-_OS_NAME_MAP. If your system is having trouble with detection, please
# make sure that the 'os' grain is capitalized and working correctly first.
_OS_FAMILY_MAP = {
"Ubuntu": "Debian",
"Fedora": "RedHat",
"Chapeau": "RedHat",
"Korora": "RedHat",
"FedBerry": "RedHat",
"CentOS": "RedHat",
"GoOSe": "RedHat",
"Scientific": "RedHat",
"Amazon": "RedHat",
"CloudLinux": "RedHat",
"OVS": "RedHat",
"OEL": "RedHat",
"XCP": "RedHat",
"XCP-ng": "RedHat",
"XenServer": "RedHat",
"RES": "RedHat",
"Sangoma": "RedHat",
"Mandrake": "Mandriva",
"ESXi": "VMware",
"Mint": "Debian",
"VMwareESX": "VMware",
"Bluewhite64": "Bluewhite",
"Slamd64": "Slackware",
"SLES": "Suse",
"SUSE Enterprise Server": "Suse",
"SUSE Enterprise Server": "Suse",
"SLED": "Suse",
"openSUSE": "Suse",
"SUSE": "Suse",
"openSUSE Leap": "Suse",
"openSUSE Tumbleweed": "Suse",
"SLES_SAP": "Suse",
"Solaris": "Solaris",
"SmartOS": "Solaris",
"OmniOS": "Solaris",
"OpenIndiana Development": "Solaris",
"OpenIndiana": "Solaris",
"OpenSolaris Development": "Solaris",
"OpenSolaris": "Solaris",
"Oracle Solaris": "Solaris",
"Arch ARM": "Arch",
"Manjaro": "Arch",
"Antergos": "Arch",
"ALT": "RedHat",
"Trisquel": "Debian",
"GCEL": "Debian",
"Linaro": "Debian",
"elementary OS": "Debian",
"elementary": "Debian",
"Univention": "Debian",
"ScientificLinux": "RedHat",
"Raspbian": "Debian",
"Devuan": "Debian",
"antiX": "Debian",
"Kali": "Debian",
"neon": "Debian",
"Cumulus": "Debian",
"Deepin": "Debian",
"NILinuxRT": "NILinuxRT",
"KDE neon": "Debian",
"Void": "Void",
"IDMS": "Debian",
"Funtoo": "Gentoo",
"AIX": "AIX",
"TurnKey": "Debian",
}
# Matches any possible format:
# DISTRIB_ID="Ubuntu"
# DISTRIB_ID='Mageia'
# DISTRIB_ID=Fedora
# DISTRIB_RELEASE='10.10'
# DISTRIB_CODENAME='squeeze'
# DISTRIB_DESCRIPTION='Ubuntu 10.10'
_LSB_REGEX = re.compile(
(
"^(DISTRIB_(?:ID|RELEASE|CODENAME|DESCRIPTION))=(?:'|\")?"
"([\\w\\s\\.\\-_]+)(?:'|\")?"
)
)
def _linux_bin_exists(binary):
"""
Does a binary exist in linux (depends on which, type, or whereis)
"""
for search_cmd in ("which", "type -ap"):
try:
return __salt__["cmd.retcode"]("{0} {1}".format(search_cmd, binary)) == 0
except salt.exceptions.CommandExecutionError:
pass
try:
return (
len(
__salt__["cmd.run_all"]("whereis -b {0}".format(binary))[
"stdout"
].split()
)
> 1
)
except salt.exceptions.CommandExecutionError:
return False
def _get_interfaces():
"""
Provide a dict of the connected interfaces and their ip addresses
"""
global _INTERFACES
if not _INTERFACES:
_INTERFACES = salt.utils.network.interfaces()
return _INTERFACES
def _parse_lsb_release():
ret = {}
try:
log.trace("Attempting to parse /etc/lsb-release")
with salt.utils.files.fopen("/etc/lsb-release") as ifile:
for line in ifile:
try:
key, value = _LSB_REGEX.match(line.rstrip("\n")).groups()[:2]
except AttributeError:
pass
else:
# Adds lsb_distrib_{id,release,codename,description}
ret["lsb_{0}".format(key.lower())] = value.rstrip()
except (IOError, OSError) as exc:
log.trace("Failed to parse /etc/lsb-release: %s", exc)
return ret
def _parse_os_release(*os_release_files):
"""
Parse os-release and return a parameter dictionary
See http://www.freedesktop.org/software/systemd/man/os-release.html
for specification of the file format.
"""
ret = {}
for filename in os_release_files:
try:
with salt.utils.files.fopen(filename) as ifile:
regex = re.compile("^([\\w]+)=(?:'|\")?(.*?)(?:'|\")?$")
for line in ifile:
match = regex.match(line.strip())
if match:
# Shell special characters ("$", quotes, backslash,
# backtick) are escaped with backslashes
ret[match.group(1)] = re.sub(
r'\\([$"\'\\`])', r"\1", match.group(2)
)
break
except (IOError, OSError):
pass
return ret
def _parse_cpe_name(cpe):
"""
Parse CPE_NAME data from the os-release
Info: https://csrc.nist.gov/projects/security-content-automation-protocol/scap-specifications/cpe
Note: cpe:2.3:part:vendor:product:version:update:edition:lang:sw_edition:target_sw:target_hw:other
however some OS's do not have the full 13 elements, for example:
CPE_NAME="cpe:2.3:o:amazon:amazon_linux:2"
:param cpe:
:return:
"""
part = {
"o": "operating system",
"h": "hardware",
"a": "application",
}
ret = {}
cpe = (cpe or "").split(":")
if len(cpe) > 4 and cpe[0] == "cpe":
if cpe[1].startswith("/"): # WFN to URI
ret["vendor"], ret["product"], ret["version"] = cpe[2:5]
ret["phase"] = cpe[5] if len(cpe) > 5 else None
ret["part"] = part.get(cpe[1][1:])
elif len(cpe) == 6 and cpe[1] == "2.3": # WFN to a string
ret["vendor"], ret["product"], ret["version"] = [
x if x != "*" else None for x in cpe[3:6]
]
ret["phase"] = None
ret["part"] = part.get(cpe[2])
elif len(cpe) > 7 and len(cpe) <= 13 and cpe[1] == "2.3": # WFN to a string
ret["vendor"], ret["product"], ret["version"], ret["phase"] = [
x if x != "*" else None for x in cpe[3:7]
]
ret["part"] = part.get(cpe[2])
return ret
def os_data():
"""
Return grains pertaining to the operating system
"""
grains = {
"num_gpus": 0,
"gpus": [],
}
# Windows Server 2008 64-bit
# ('Windows', 'MINIONNAME', '2008ServerR2', '6.1.7601', 'AMD64',
# 'Intel64 Fam ily 6 Model 23 Stepping 6, GenuineIntel')
# Ubuntu 10.04
# ('Linux', 'MINIONNAME', '2.6.32-38-server',
# '#83-Ubuntu SMP Wed Jan 4 11:26:59 UTC 2012', 'x86_64', '')
# pylint: disable=unpacking-non-sequence
(
grains["kernel"],
grains["nodename"],
grains["kernelrelease"],
grains["kernelversion"],
grains["cpuarch"],
_,
) = platform.uname()
# pylint: enable=unpacking-non-sequence
if salt.utils.platform.is_proxy():
grains["kernel"] = "proxy"
grains["kernelrelease"] = "proxy"
grains["kernelversion"] = "proxy"
grains["osrelease"] = "proxy"
grains["os"] = "proxy"
grains["os_family"] = "proxy"
grains["osfullname"] = "proxy"
elif salt.utils.platform.is_windows():
grains["os"] = "Windows"
grains["os_family"] = "Windows"
grains.update(_memdata(grains))
grains.update(_windows_platform_data())
grains.update(_windows_cpudata())
grains.update(_windows_virtual(grains))
grains.update(_ps(grains))
if "Server" in grains["osrelease"]:
osrelease_info = grains["osrelease"].split("Server", 1)
osrelease_info[1] = osrelease_info[1].lstrip("R")
else:
osrelease_info = grains["osrelease"].split(".")
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
grains["osrelease_info"] = tuple(osrelease_info)
grains["osfinger"] = "{os}-{ver}".format(
os=grains["os"], ver=grains["osrelease"]
)
grains["init"] = "Windows"
return grains
elif salt.utils.platform.is_linux():
# Add SELinux grain, if you have it
if _linux_bin_exists("selinuxenabled"):
log.trace("Adding selinux grains")
grains["selinux"] = {}
grains["selinux"]["enabled"] = (
__salt__["cmd.retcode"]("selinuxenabled") == 0
)
if _linux_bin_exists("getenforce"):
grains["selinux"]["enforced"] = __salt__["cmd.run"](
"getenforce"
).strip()
# Add systemd grain, if you have it
if _linux_bin_exists("systemctl") and _linux_bin_exists("localectl"):
log.trace("Adding systemd grains")
grains["systemd"] = {}
systemd_info = __salt__["cmd.run"]("systemctl --version").splitlines()
grains["systemd"]["version"] = systemd_info[0].split()[1]
grains["systemd"]["features"] = systemd_info[1]
# Add init grain
grains["init"] = "unknown"
log.trace("Adding init grain")
try:
os.stat("/run/systemd/system")
grains["init"] = "systemd"
except (OSError, IOError):
try:
with salt.utils.files.fopen("/proc/1/cmdline") as fhr:
init_cmdline = fhr.read().replace("\x00", " ").split()
except (IOError, OSError):
pass
else:
try:
init_bin = salt.utils.path.which(init_cmdline[0])
except IndexError:
# Emtpy init_cmdline
init_bin = None
log.warning("Unable to fetch data from /proc/1/cmdline")
if init_bin is not None and init_bin.endswith("bin/init"):
supported_inits = (b"upstart", b"sysvinit", b"systemd")
edge_len = max(len(x) for x in supported_inits) - 1
try:
buf_size = __opts__["file_buffer_size"]
except KeyError:
# Default to the value of file_buffer_size for the minion
buf_size = 262144
try:
with salt.utils.files.fopen(init_bin, "rb") as fp_:
edge = b""
buf = fp_.read(buf_size).lower()
while buf:
buf = edge + buf
for item in supported_inits:
if item in buf:
if six.PY3:
item = item.decode("utf-8")
grains["init"] = item
buf = b""
break
edge = buf[-edge_len:]
buf = fp_.read(buf_size).lower()
except (IOError, OSError) as exc:
log.error(
"Unable to read from init_bin (%s): %s", init_bin, exc
)
elif salt.utils.path.which("supervisord") in init_cmdline:
grains["init"] = "supervisord"
elif salt.utils.path.which("dumb-init") in init_cmdline:
# https://github.com/Yelp/dumb-init
grains["init"] = "dumb-init"
elif salt.utils.path.which("tini") in init_cmdline:
# https://github.com/krallin/tini
grains["init"] = "tini"
elif init_cmdline == ["runit"]:
grains["init"] = "runit"
elif "/sbin/my_init" in init_cmdline:
# Phusion Base docker container use runit for srv mgmt, but
# my_init as pid1
grains["init"] = "runit"
else:
log.debug(
"Could not determine init system from command line: (%s)",
" ".join(init_cmdline),
)
# Add lsb grains on any distro with lsb-release. Note that this import
# can fail on systems with lsb-release installed if the system package
# does not install the python package for the python interpreter used by
# Salt (i.e. python2 or python3)
try:
log.trace("Getting lsb_release distro information")
import lsb_release # pylint: disable=import-error
release = lsb_release.get_distro_information()
for key, value in six.iteritems(release):
key = key.lower()
lsb_param = "lsb_{0}{1}".format(
"" if key.startswith("distrib_") else "distrib_", key
)
grains[lsb_param] = value
# Catch a NameError to workaround possible breakage in lsb_release
# See https://github.com/saltstack/salt/issues/37867
except (ImportError, NameError):
# if the python library isn't available, try to parse
# /etc/lsb-release using regex
log.trace("lsb_release python bindings not available")
grains.update(_parse_lsb_release())
if grains.get("lsb_distrib_description", "").lower().startswith("antergos"):
# Antergos incorrectly configures their /etc/lsb-release,
# setting the DISTRIB_ID to "Arch". This causes the "os" grain
# to be incorrectly set to "Arch".
grains["osfullname"] = "Antergos Linux"
elif "lsb_distrib_id" not in grains:
log.trace("Failed to get lsb_distrib_id, trying to parse os-release")
os_release = _parse_os_release("/etc/os-release", "/usr/lib/os-release")
if os_release:
if "NAME" in os_release:
grains["lsb_distrib_id"] = os_release["NAME"].strip()
if "VERSION_ID" in os_release:
grains["lsb_distrib_release"] = os_release["VERSION_ID"]
if "VERSION_CODENAME" in os_release:
grains["lsb_distrib_codename"] = os_release["VERSION_CODENAME"]
elif "PRETTY_NAME" in os_release:
codename = os_release["PRETTY_NAME"]
# https://github.com/saltstack/salt/issues/44108
if os_release["ID"] == "debian":
codename_match = re.search(r"\((\w+)\)$", codename)
if codename_match:
codename = codename_match.group(1)
grains["lsb_distrib_codename"] = codename
if "CPE_NAME" in os_release:
cpe = _parse_cpe_name(os_release["CPE_NAME"])
if not cpe:
log.error("Broken CPE_NAME format in /etc/os-release!")
elif cpe.get("vendor", "").lower() in ["suse", "opensuse"]:
grains["os"] = "SUSE"
# openSUSE `osfullname` grain normalization
if os_release.get("NAME") == "openSUSE Leap":
grains["osfullname"] = "Leap"
elif os_release.get("VERSION") == "Tumbleweed":
grains["osfullname"] = os_release["VERSION"]
# Override VERSION_ID, if CPE_NAME around
if (
cpe.get("version") and cpe.get("vendor") == "opensuse"
): # Keep VERSION_ID for SLES
grains["lsb_distrib_release"] = cpe["version"]
elif os.path.isfile("/etc/SuSE-release"):
log.trace("Parsing distrib info from /etc/SuSE-release")
grains["lsb_distrib_id"] = "SUSE"
version = ""
patch = ""
with salt.utils.files.fopen("/etc/SuSE-release") as fhr:
for line in fhr:
if "enterprise" in line.lower():
grains["lsb_distrib_id"] = "SLES"
grains["lsb_distrib_codename"] = re.sub(
r"\(.+\)", "", line
).strip()
elif "version" in line.lower():
version = re.sub(r"[^0-9]", "", line)
elif "patchlevel" in line.lower():
patch = re.sub(r"[^0-9]", "", line)
grains["lsb_distrib_release"] = version
if patch:
grains["lsb_distrib_release"] += "." + patch
patchstr = "SP" + patch
if (
grains["lsb_distrib_codename"]
and patchstr not in grains["lsb_distrib_codename"]
):
grains["lsb_distrib_codename"] += " " + patchstr
if not grains.get("lsb_distrib_codename"):
grains["lsb_distrib_codename"] = "n.a"
elif os.path.isfile("/etc/altlinux-release"):
log.trace("Parsing distrib info from /etc/altlinux-release")
# ALT Linux
grains["lsb_distrib_id"] = "altlinux"
with salt.utils.files.fopen("/etc/altlinux-release") as ifile:
# This file is symlinked to from:
# /etc/fedora-release
# /etc/redhat-release
# /etc/system-release
for line in ifile:
# ALT Linux Sisyphus (unstable)
comps = line.split()
if comps[0] == 'ALT':
grains['lsb_distrib_release'] = comps[2]
grains['lsb_distrib_codename'] = \
comps[3].replace('(', '').replace(')', '')
elif os.path.isfile('/etc/centos-release'):
log.trace('Parsing distrib info from /etc/centos-release')
# Maybe CentOS Linux; could also be SUSE Expanded Support.
# SUSE ES has both, centos-release and redhat-release.
if os.path.isfile('/etc/redhat-release'):
with salt.utils.files.fopen('/etc/redhat-release') as ifile:
for line in ifile:
if "red hat enterprise linux server" in line.lower():
# This is a SUSE Expanded Support Rhel installation
grains['lsb_distrib_id'] = 'RedHat'
break
grains.setdefault('lsb_distrib_id', 'CentOS')
with salt.utils.files.fopen('/etc/centos-release') as ifile:
for line in ifile:
# Need to pull out the version and codename
# in the case of custom content in /etc/centos-release
find_release = re.compile(r"\d+\.\d+")
find_codename = re.compile(r"(?<=\()(.*?)(?=\))")
release = find_release.search(line)
codename = find_codename.search(line)
if release is not None:
grains["lsb_distrib_release"] = release.group()
if codename is not None:
grains["lsb_distrib_codename"] = codename.group()
elif os.path.isfile("/etc.defaults/VERSION") and os.path.isfile(
"/etc.defaults/synoinfo.conf"
):
grains["osfullname"] = "Synology"
log.trace(
"Parsing Synology distrib info from /etc/.defaults/VERSION"
)
with salt.utils.files.fopen("/etc.defaults/VERSION", "r") as fp_:
synoinfo = {}
for line in fp_:
try:
key, val = line.rstrip("\n").split("=")
except ValueError:
continue
if key in ("majorversion", "minorversion", "buildnumber"):
synoinfo[key] = val.strip('"')
if len(synoinfo) != 3:
log.warning(
"Unable to determine Synology version info. "
"Please report this, as it is likely a bug."
)
else:
grains[
"osrelease"
] = "{majorversion}.{minorversion}-{buildnumber}".format(
**synoinfo
)
# Use the already intelligent platform module to get distro info
# (though apparently it's not intelligent enough to strip quotes)
log.trace(
'Getting OS name, release, and codename from '
'distro.linux_distribution()'
)
(osname, osrelease, oscodename) = [
x.strip('"').strip("'") for x in linux_distribution()
]
# Try to assign these three names based on the lsb info, they tend to
# be more accurate than what python gets from /etc/DISTRO-release.
# It's worth noting that Ubuntu has patched their Python distribution
# so that linux_distribution() does the /etc/lsb-release parsing, but
# we do it anyway here for the sake for full portability.
if "osfullname" not in grains:
# If NI Linux RT distribution, set the grains['osfullname'] to 'nilrt'
if grains.get("lsb_distrib_id", "").lower().startswith("nilrt"):
grains["osfullname"] = "nilrt"
else:
grains["osfullname"] = grains.get("lsb_distrib_id", osname).strip()
if "osrelease" not in grains:
# NOTE: This is a workaround for CentOS 7 os-release bug
# https://bugs.centos.org/view.php?id=8359
# /etc/os-release contains no minor distro release number so we fall back to parse
# /etc/centos-release file instead.
# Commit introducing this comment should be reverted after the upstream bug is released.
if "CentOS Linux 7" in grains.get("lsb_distrib_codename", ""):
grains.pop("lsb_distrib_release", None)
grains["osrelease"] = grains.get("lsb_distrib_release", osrelease).strip()
grains["oscodename"] = (
grains.get("lsb_distrib_codename", "").strip() or oscodename
)
if "Red Hat" in grains["oscodename"]:
grains["oscodename"] = oscodename
distroname = _REPLACE_LINUX_RE.sub("", grains["osfullname"]).strip()
# return the first ten characters with no spaces, lowercased
shortname = distroname.replace(" ", "").lower()[:10]
# this maps the long names from the /etc/DISTRO-release files to the
# traditional short names that Salt has used.
if "os" not in grains:
grains["os"] = _OS_NAME_MAP.get(shortname, distroname)
grains.update(_linux_cpudata())
grains.update(_linux_gpu_data())
elif grains["kernel"] == "SunOS":
if salt.utils.platform.is_smartos():
# See https://github.com/joyent/smartos-live/issues/224
if HAS_UNAME:
uname_v = os.uname()[3] # format: joyent_20161101T004406Z
else:
uname_v = os.name
uname_v = uname_v[uname_v.index("_") + 1 :]
grains["os"] = grains["osfullname"] = "SmartOS"
# store a parsed version of YYYY.MM.DD as osrelease
grains["osrelease"] = ".".join(
[
uname_v.split("T")[0][0:4],
uname_v.split("T")[0][4:6],
uname_v.split("T")[0][6:8],
]
)
# store a untouched copy of the timestamp in osrelease_stamp
grains["osrelease_stamp"] = uname_v
elif os.path.isfile("/etc/release"):
with salt.utils.files.fopen("/etc/release", "r") as fp_:
rel_data = fp_.read()
try:
release_re = re.compile(
r"((?:Open|Oracle )?Solaris|OpenIndiana|OmniOS) (Development)?"
r"\s*(\d+\.?\d*|v\d+)\s?[A-Z]*\s?(r\d+|\d+\/\d+|oi_\S+|snv_\S+)?"
)
(
osname,
development,
osmajorrelease,
osminorrelease,
) = release_re.search(rel_data).groups()
except AttributeError:
# Set a blank osrelease grain and fallback to 'Solaris'
# as the 'os' grain.
grains["os"] = grains["osfullname"] = "Solaris"
grains["osrelease"] = ""
else:
if development is not None:
osname = " ".join((osname, development))
if HAS_UNAME:
uname_v = os.uname()[3]
else:
uname_v = os.name
grains["os"] = grains["osfullname"] = osname
if osname in ["Oracle Solaris"] and uname_v.startswith(
osmajorrelease
):
# Oracla Solars 11 and up have minor version in uname
grains["osrelease"] = uname_v
elif osname in ["OmniOS"]:
# OmniOS
osrelease = []
osrelease.append(osmajorrelease[1:])
osrelease.append(osminorrelease[1:])
grains["osrelease"] = ".".join(osrelease)
grains["osrelease_stamp"] = uname_v
else:
# Sun Solaris 10 and earlier/comparable
osrelease = []
osrelease.append(osmajorrelease)
if osminorrelease:
osrelease.append(osminorrelease)
grains["osrelease"] = ".".join(osrelease)
grains["osrelease_stamp"] = uname_v
grains.update(_sunos_cpudata())
elif grains["kernel"] == "VMkernel":
grains["os"] = "ESXi"
elif grains["kernel"] == "Darwin":
osrelease = __salt__["cmd.run"]("sw_vers -productVersion")
osname = __salt__["cmd.run"]("sw_vers -productName")
osbuild = __salt__["cmd.run"]("sw_vers -buildVersion")
grains["os"] = "MacOS"
grains["os_family"] = "MacOS"
grains["osfullname"] = "{0} {1}".format(osname, osrelease)
grains["osrelease"] = osrelease
grains["osbuild"] = osbuild
grains["init"] = "launchd"
grains.update(_bsd_cpudata(grains))
grains.update(_osx_gpudata())
grains.update(_osx_platform_data())
elif grains["kernel"] == "AIX":
osrelease = __salt__["cmd.run"]("oslevel")
osrelease_techlevel = __salt__["cmd.run"]("oslevel -r")
osname = __salt__["cmd.run"]("uname")
grains["os"] = "AIX"
grains["osfullname"] = osname
grains["osrelease"] = osrelease
grains["osrelease_techlevel"] = osrelease_techlevel
grains.update(_aix_cpudata())
else:
grains["os"] = grains["kernel"]
if grains["kernel"] == "FreeBSD":
grains["osfullname"] = grains["os"]
try:
grains["osrelease"] = __salt__["cmd.run"]("freebsd-version -u").split("-")[
0
]
except salt.exceptions.CommandExecutionError:
# freebsd-version was introduced in 10.0.
# derive osrelease from kernelversion prior to that
grains["osrelease"] = grains["kernelrelease"].split("-")[0]
grains.update(_bsd_cpudata(grains))
if grains["kernel"] in ("OpenBSD", "NetBSD"):
grains.update(_bsd_cpudata(grains))
grains["osrelease"] = grains["kernelrelease"].split("-")[0]
if grains["kernel"] == "NetBSD":
grains.update(_netbsd_gpu_data())
if not grains["os"]:
grains["os"] = "Unknown {0}".format(grains["kernel"])
grains["os_family"] = "Unknown"
else:
# this assigns family names based on the os name
# family defaults to the os name if not found
grains["os_family"] = _OS_FAMILY_MAP.get(grains["os"], grains["os"])
# Build the osarch grain. This grain will be used for platform-specific
# considerations such as package management. Fall back to the CPU
# architecture.
if grains.get("os_family") == "Debian":
osarch = __salt__["cmd.run"]("dpkg --print-architecture").strip()
elif grains.get("os_family") in ["RedHat", "Suse"]:
osarch = salt.utils.pkg.rpm.get_osarch()
elif grains.get("os_family") in ("NILinuxRT", "Poky"):
archinfo = {}
for line in __salt__["cmd.run"]("opkg print-architecture").splitlines():
if line.startswith("arch"):
_, arch, priority = line.split()
archinfo[arch.strip()] = int(priority.strip())
# Return osarch in priority order (higher to lower)
osarch = sorted(archinfo, key=archinfo.get, reverse=True)
else:
osarch = grains["cpuarch"]
grains["osarch"] = osarch
grains.update(_memdata(grains))
# Get the hardware and bios data
grains.update(_hw_data(grains))
# Load the virtual machine info
grains.update(_virtual(grains))
grains.update(_virtual_hv(grains))
grains.update(_ps(grains))
if grains.get("osrelease", ""):
osrelease_info = grains["osrelease"].split(".")
for idx, value in enumerate(osrelease_info):
if not value.isdigit():
continue
osrelease_info[idx] = int(value)
grains["osrelease_info"] = tuple(osrelease_info)
try:
grains["osmajorrelease"] = int(grains["osrelease_info"][0])
except (IndexError, TypeError, ValueError):
log.debug(
"Unable to derive osmajorrelease from osrelease_info '%s'. "
"The osmajorrelease grain will not be set.",
grains["osrelease_info"],
)
os_name = grains[
"os"
if grains.get("os")
in ("Debian", "FreeBSD", "OpenBSD", "NetBSD", "Mac", "Raspbian")
else "osfullname"
]
grains["osfinger"] = "{0}-{1}".format(
os_name,
grains["osrelease"]
if os_name in ("Ubuntu",)
else grains["osrelease_info"][0],
)
return grains
def locale_info():
"""
Provides
defaultlanguage
defaultencoding
"""
grains = {}
grains["locale_info"] = {}
if salt.utils.platform.is_proxy():
return grains
try:
(
grains["locale_info"]["defaultlanguage"],
grains["locale_info"]["defaultencoding"],
) = locale.getdefaultlocale()
except Exception: # pylint: disable=broad-except
# locale.getdefaultlocale can ValueError!! Catch anything else it
# might do, per #2205
grains["locale_info"]["defaultlanguage"] = "unknown"
grains["locale_info"]["defaultencoding"] = "unknown"
grains["locale_info"]["detectedencoding"] = __salt_system_encoding__
grains["locale_info"]["timezone"] = "unknown"
if _DATEUTIL_TZ:
try:
grains["locale_info"]["timezone"] = datetime.datetime.now(
dateutil.tz.tzlocal()
).tzname()
except UnicodeDecodeError:
# Because the method 'tzname' is not a part of salt the decoding error cant be fixed.
# The error is in datetime in the python2 lib
if salt.utils.platform.is_windows():
grains["locale_info"]["timezone"] = time.tzname[0].decode("mbcs")
return grains
def hostname():
"""
Return fqdn, hostname, domainname
.. note::
On Windows the ``domain`` grain may refer to the dns entry for the host
instead of the Windows domain to which the host is joined. It may also
be empty if not a part of any domain. Refer to the ``windowsdomain``
grain instead
"""
# This is going to need some work
# Provides:
# fqdn
# host
# localhost
# domain
global __FQDN__
grains = {}
if salt.utils.platform.is_proxy():
return grains
grains["localhost"] = socket.gethostname()
if __FQDN__ is None:
__FQDN__ = salt.utils.network.get_fqhostname()
# On some distros (notably FreeBSD) if there is no hostname set
# salt.utils.network.get_fqhostname() will return None.
# In this case we punt and log a message at error level, but force the
# hostname and domain to be localhost.localdomain
# Otherwise we would stacktrace below
if __FQDN__ is None: # still!
log.error(
"Having trouble getting a hostname. Does this machine have its hostname and domain set properly?"
)
__FQDN__ = "localhost.localdomain"
grains["fqdn"] = __FQDN__
(grains["host"], grains["domain"]) = grains["fqdn"].partition(".")[::2]
return grains
def append_domain():
"""
Return append_domain if set
"""
grain = {}
if salt.utils.platform.is_proxy():
return grain
if "append_domain" in __opts__:
grain["append_domain"] = __opts__["append_domain"]
return grain
def fqdns():
"""
Return all known FQDNs for the system by enumerating all interfaces and
then trying to reverse resolve them (excluding 'lo' interface).
"""
# Provides:
# fqdns
grains = {}
fqdns = set()
addresses = salt.utils.network.ip_addrs(include_loopback=False, interface_data=_get_interfaces())
addresses.extend(salt.utils.network.ip_addrs6(include_loopback=False, interface_data=_get_interfaces()))
err_message = 'Exception during resolving address: %s'
for ip in addresses:
try:
name, aliaslist, addresslist = socket.gethostbyaddr(ip)
fqdns.update([socket.getfqdn(name)] + [als for als in aliaslist if salt.utils.network.is_fqdn(als)])
except socket.herror as err:
if err.errno in (0, HOST_NOT_FOUND, NO_DATA):
# No FQDN for this IP address, so we don't need to know this all the time.
log.debug("Unable to resolve address %s: %s", ip, err)
else:
log.error(err_message, ip, err)
except (socket.error, socket.gaierror, socket.timeout) as err:
log.error(err_message, ip, err)
return {"fqdns": sorted(list(fqdns))}
def ip_fqdn():
"""
Return ip address and FQDN grains
"""
if salt.utils.platform.is_proxy():
return {}
ret = {}
ret["ipv4"] = salt.utils.network.ip_addrs(include_loopback=True)
ret["ipv6"] = salt.utils.network.ip_addrs6(include_loopback=True)
_fqdn = hostname()["fqdn"]
for socket_type, ipv_num in ((socket.AF_INET, "4"), (socket.AF_INET6, "6")):
key = "fqdn_ip" + ipv_num
if not ret["ipv" + ipv_num]:
ret[key] = []
else:
try:
start_time = datetime.datetime.utcnow()
info = socket.getaddrinfo(_fqdn, None, socket_type)
ret[key] = list(set(item[4][0] for item in info))
except (socket.error, UnicodeError):
timediff = datetime.datetime.utcnow() - start_time
if timediff.seconds > 5 and __opts__["__role"] == "master":
log.warning(
'Unable to find IPv%s record for "%s" causing a %s '
"second timeout when rendering grains. Set the dns or "
"/etc/hosts for IPv%s to clear this.",
ipv_num,
_fqdn,
timediff,
ipv_num,
)
ret[key] = []
return ret
def ip_interfaces():
"""
Provide a dict of the connected interfaces and their ip addresses
The addresses will be passed as a list for each interface
"""
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get("inet", []):
if "address" in inet:
iface_ips.append(inet["address"])
for inet in ifaces[face].get("inet6", []):
if "address" in inet:
iface_ips.append(inet["address"])
for secondary in ifaces[face].get("secondary", []):
if "address" in secondary:
iface_ips.append(secondary["address"])
ret[face] = iface_ips
return {"ip_interfaces": ret}
def ip4_interfaces():
"""
Provide a dict of the connected interfaces and their ip4 addresses
The addresses will be passed as a list for each interface
"""
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get("inet", []):
if "address" in inet:
iface_ips.append(inet["address"])
for secondary in ifaces[face].get("secondary", []):
if "address" in secondary:
iface_ips.append(secondary["address"])
ret[face] = iface_ips
return {"ip4_interfaces": ret}
def ip6_interfaces():
"""
Provide a dict of the connected interfaces and their ip6 addresses
The addresses will be passed as a list for each interface
"""
# Provides:
# ip_interfaces
if salt.utils.platform.is_proxy():
return {}
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
iface_ips = []
for inet in ifaces[face].get("inet6", []):
if "address" in inet:
iface_ips.append(inet["address"])
for secondary in ifaces[face].get("secondary", []):
if "address" in secondary:
iface_ips.append(secondary["address"])
ret[face] = iface_ips
return {"ip6_interfaces": ret}
def hwaddr_interfaces():
"""
Provide a dict of the connected interfaces and their
hw addresses (Mac Address)
"""
# Provides:
# hwaddr_interfaces
ret = {}
ifaces = _get_interfaces()
for face in ifaces:
if "hwaddr" in ifaces[face]:
ret[face] = ifaces[face]["hwaddr"]
return {"hwaddr_interfaces": ret}
def dns():
"""
Parse the resolver configuration file
.. versionadded:: 2016.3.0
"""
# Provides:
# dns
if salt.utils.platform.is_windows() or "proxyminion" in __opts__:
return {}
resolv = salt.utils.dns.parse_resolv()
for key in ("nameservers", "ip4_nameservers", "ip6_nameservers", "sortlist"):
if key in resolv:
resolv[key] = [six.text_type(i) for i in resolv[key]]
return {"dns": resolv} if resolv else {}
def get_machine_id():
"""
Provide the machine-id for machine/virtualization combination
"""
# Provides:
# machine-id
if platform.system() == "AIX":
return _aix_get_machine_id()
locations = ["/etc/machine-id", "/var/lib/dbus/machine-id"]
existing_locations = [loc for loc in locations if os.path.exists(loc)]
if not existing_locations:
return {}
else:
with salt.utils.files.fopen(existing_locations[0]) as machineid:
return {"machine_id": machineid.read().strip()}
def cwd():
"""
Current working directory
"""
return {"cwd": os.getcwd()}
def cwd():
'''
Current working directory
'''
return {'cwd': os.getcwd()}
def path():
"""
Return the path
"""
# Provides:
# path
# systempath
_path = salt.utils.stringutils.to_unicode(os.environ.get("PATH", "").strip())
return {
"path": _path,
"systempath": _path.split(os.path.pathsep),
}
def pythonversion():
"""
Return the Python version
"""
# Provides:
# pythonversion
return {"pythonversion": list(sys.version_info)}
def pythonpath():
"""
Return the Python path
"""
# Provides:
# pythonpath
return {"pythonpath": sys.path}
def pythonexecutable():
"""
Return the python executable in use
"""
# Provides:
# pythonexecutable
return {"pythonexecutable": sys.executable}
def saltpath():
"""
Return the path of the salt module
"""
# Provides:
# saltpath
salt_path = os.path.abspath(os.path.join(__file__, os.path.pardir))
return {"saltpath": os.path.dirname(salt_path)}
def saltversion():
"""
Return the version of salt
"""
# Provides:
# saltversion
from salt.version import __version__
return {"saltversion": __version__}
def zmqversion():
"""
Return the zeromq version
"""
# Provides:
# zmqversion
try:
import zmq
return {"zmqversion": zmq.zmq_version()} # pylint: disable=no-member
except ImportError:
return {}
def saltversioninfo():
"""
Return the version_info of salt
.. versionadded:: 0.17.0
"""
# Provides:
# saltversioninfo
from salt.version import __version_info__
return {"saltversioninfo": list(__version_info__)}
def _hw_data(osdata):
"""
Get system specific hardware data from dmidecode
Provides
biosversion
productname
manufacturer
serialnumber
biosreleasedate
uuid
.. versionadded:: 0.9.5
"""
if salt.utils.platform.is_proxy():
return {}
grains = {}
if osdata["kernel"] == "Linux" and os.path.exists("/sys/class/dmi/id"):
# On many Linux distributions basic firmware information is available via sysfs
# requires CONFIG_DMIID to be enabled in the Linux kernel configuration
sysfs_firmware_info = {
"biosversion": "bios_version",
"productname": "product_name",
"manufacturer": "sys_vendor",
"biosreleasedate": "bios_date",
"uuid": "product_uuid",
"serialnumber": "product_serial",
}
for key, fw_file in sysfs_firmware_info.items():
contents_file = os.path.join("/sys/class/dmi/id", fw_file)
if os.path.exists(contents_file):
try:
with salt.utils.files.fopen(contents_file, "r") as ifile:
grains[key] = salt.utils.stringutils.to_unicode(
ifile.read().strip(), errors="replace"
)
if key == "uuid":
grains["uuid"] = grains["uuid"].lower()
except UnicodeDecodeError:
# Some firmwares provide non-valid 'product_name'
# files, ignore them
log.debug(
"The content in /sys/devices/virtual/dmi/id/product_name is not valid"
)
except (IOError, OSError) as err:
# PermissionError is new to Python 3, but corresponds to the EACESS and
# EPERM error numbers. Use those instead here for PY2 compatibility.
if err.errno == EACCES or err.errno == EPERM:
# Skip the grain if non-root user has no access to the file.
pass
elif salt.utils.path.which_bin(["dmidecode", "smbios"]) is not None and not (
salt.utils.platform.is_smartos()
or ( # SunOS on SPARC - 'smbios: failed to load SMBIOS: System does not export an SMBIOS table'
osdata["kernel"] == "SunOS" and osdata["cpuarch"].startswith("sparc")
)
):
# On SmartOS (possibly SunOS also) smbios only works in the global zone
# smbios is also not compatible with linux's smbios (smbios -s = print summarized)
grains = {
"biosversion": __salt__["smbios.get"]("bios-version"),
"productname": __salt__["smbios.get"]("system-product-name"),
"manufacturer": __salt__["smbios.get"]("system-manufacturer"),
"biosreleasedate": __salt__["smbios.get"]("bios-release-date"),
"uuid": __salt__["smbios.get"]("system-uuid"),
}
grains = dict([(key, val) for key, val in grains.items() if val is not None])
uuid = __salt__["smbios.get"]("system-uuid")
if uuid is not None:
grains["uuid"] = uuid.lower()
for serial in (
"system-serial-number",
"chassis-serial-number",
"baseboard-serial-number",
):
serial = __salt__["smbios.get"](serial)
if serial is not None:
grains["serialnumber"] = serial
break
elif salt.utils.path.which_bin(["fw_printenv"]) is not None:
# ARM Linux devices expose UBOOT env variables via fw_printenv
hwdata = {
"manufacturer": "manufacturer",
"serialnumber": "serial#",
"productname": "DeviceDesc",
}
for grain_name, cmd_key in six.iteritems(hwdata):
result = __salt__["cmd.run_all"]("fw_printenv {0}".format(cmd_key))
if result["retcode"] == 0:
uboot_keyval = result["stdout"].split("=")
grains[grain_name] = _clean_value(grain_name, uboot_keyval[1])
elif osdata["kernel"] == "FreeBSD":
# On FreeBSD /bin/kenv (already in base system)
# can be used instead of dmidecode
kenv = salt.utils.path.which("kenv")
if kenv:
# In theory, it will be easier to add new fields to this later
fbsd_hwdata = {
"biosversion": "smbios.bios.version",
"manufacturer": "smbios.system.maker",
"serialnumber": "smbios.system.serial",
"productname": "smbios.system.product",
"biosreleasedate": "smbios.bios.reldate",
"uuid": "smbios.system.uuid",
}
for key, val in six.iteritems(fbsd_hwdata):
value = __salt__["cmd.run"]("{0} {1}".format(kenv, val))
grains[key] = _clean_value(key, value)
elif osdata["kernel"] == "OpenBSD":
sysctl = salt.utils.path.which("sysctl")
hwdata = {
"biosversion": "hw.version",
"manufacturer": "hw.vendor",
"productname": "hw.product",
"serialnumber": "hw.serialno",
"uuid": "hw.uuid",
}
for key, oid in six.iteritems(hwdata):
value = __salt__["cmd.run"]("{0} -n {1}".format(sysctl, oid))
if not value.endswith(" value is not available"):
grains[key] = _clean_value(key, value)
elif osdata["kernel"] == "NetBSD":
sysctl = salt.utils.path.which("sysctl")
nbsd_hwdata = {
"biosversion": "machdep.dmi.board-version",
"manufacturer": "machdep.dmi.system-vendor",
"serialnumber": "machdep.dmi.system-serial",
"productname": "machdep.dmi.system-product",
"biosreleasedate": "machdep.dmi.bios-date",
"uuid": "machdep.dmi.system-uuid",
}
for key, oid in six.iteritems(nbsd_hwdata):
result = __salt__["cmd.run_all"]("{0} -n {1}".format(sysctl, oid))
if result["retcode"] == 0:
grains[key] = _clean_value(key, result["stdout"])
elif osdata["kernel"] == "Darwin":
grains["manufacturer"] = "Apple Inc."
sysctl = salt.utils.path.which("sysctl")
hwdata = {"productname": "hw.model"}
for key, oid in hwdata.items():
value = __salt__["cmd.run"]("{0} -b {1}".format(sysctl, oid))
if not value.endswith(" is invalid"):
grains[key] = _clean_value(key, value)
elif osdata["kernel"] == "SunOS" and osdata["cpuarch"].startswith("sparc"):
# Depending on the hardware model, commands can report different bits
# of information. With that said, consolidate the output from various
# commands and attempt various lookups.
data = ""
for (cmd, args) in (
("/usr/sbin/prtdiag", "-v"),
("/usr/sbin/prtconf", "-vp"),
("/usr/sbin/virtinfo", "-a"),
):
if salt.utils.path.which(cmd): # Also verifies that cmd is executable
data += __salt__["cmd.run"]("{0} {1}".format(cmd, args))
data += "\n"
sn_regexes = [
re.compile(r)
for r in [
r"(?im)^\s*Chassis\s+Serial\s+Number\n-+\n(\S+)", # prtdiag
r"(?im)^\s*chassis-sn:\s*(\S+)", # prtconf
r"(?im)^\s*Chassis\s+Serial#:\s*(\S+)", # virtinfo
]
]
obp_regexes = [
re.compile(r)
for r in [
r"(?im)^\s*System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)", # prtdiag
r"(?im)^\s*version:\s*\'OBP\s+(\S+)\s+(\S+)", # prtconf
]
]
fw_regexes = [
re.compile(r)
for r in [r"(?im)^\s*Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)"] # prtdiag
]
uuid_regexes = [
re.compile(r) for r in [r"(?im)^\s*Domain\s+UUID:\s*(\S+)"] # virtinfo
]
manufacture_regexes = [
re.compile(r)
for r in [r"(?im)^\s*System\s+Configuration:\s*(.*)(?=sun)"] # prtdiag
]
product_regexes = [
re.compile(r)
for r in [
r"(?im)^\s*System\s+Configuration:\s*.*?sun\d\S+[^\S\r\n]*(.*)", # prtdiag
r"(?im)^[^\S\r\n]*banner-name:[^\S\r\n]*(.*)", # prtconf
r"(?im)^[^\S\r\n]*product-name:[^\S\r\n]*(.*)", # prtconf
]
]
sn_regexes = [
re.compile(r)
for r in [
r"(?im)Chassis\s+Serial\s+Number\n-+\n(\S+)", # prtdiag
r"(?i)Chassis\s+Serial#:\s*(\S+)", # virtinfo
r"(?i)chassis-sn:\s*(\S+)", # prtconf
]
]
obp_regexes = [
re.compile(r)
for r in [
r"(?im)System\s+PROM\s+revisions.*\nVersion\n-+\nOBP\s+(\S+)\s+(\S+)", # prtdiag
r"(?im)version:\s*\'OBP\s+(\S+)\s+(\S+)", # prtconf
]
]
fw_regexes = [
re.compile(r)
for r in [r"(?i)Sun\s+System\s+Firmware\s+(\S+)\s+(\S+)"] # prtdiag
]
uuid_regexes = [
re.compile(r) for r in [r"(?i)Domain\s+UUID:\s+(\S+)"] # virtinfo
]
for regex in sn_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains["serialnumber"] = res.group(1).strip().replace("'", "")
break
for regex in obp_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
obp_rev, obp_date = res.groups()[
0:2
] # Limit the number in case we found the data in multiple places
grains["biosversion"] = obp_rev.strip().replace("'", "")
grains["biosreleasedate"] = obp_date.strip().replace("'", "")
for regex in fw_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
fw_rev, fw_date = res.groups()[0:2]
grains["systemfirmware"] = fw_rev.strip().replace("'", "")
grains["systemfirmwaredate"] = fw_date.strip().replace("'", "")
break
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains["uuid"] = res.group(1).strip().replace("'", "")
break
for regex in manufacture_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains["manufacture"] = res.group(1).strip().replace("'", "")
break
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
t_productname = res.group(1).strip().replace("'", "")
if t_productname:
grains["product"] = t_productname
grains["productname"] = t_productname
break
elif osdata["kernel"] == "AIX":
cmd = salt.utils.path.which("prtconf")
if cmd:
data = __salt__["cmd.run"]("{0}".format(cmd)) + os.linesep
for dest, regstring in (
("serialnumber", r"(?im)^\s*Machine\s+Serial\s+Number:\s+(\S+)"),
("systemfirmware", r"(?im)^\s*Firmware\s+Version:\s+(.*)"),
):
for regex in [re.compile(r) for r in [regstring]]:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains[dest] = res.group(1).strip().replace("'", "")
product_regexes = [re.compile(r"(?im)^\s*System\s+Model:\s+(\S+)")]
for regex in product_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains["manufacturer"], grains["productname"] = (
res.group(1).strip().replace("'", "").split(",")
)
break
else:
log.error("The 'prtconf' binary was not found in $PATH.")
return grains
def _get_hash_by_shell():
'''
Shell-out Python 3 for compute reliable hash
:return:
'''
id_ = __opts__.get('id', '')
id_hash = None
py_ver = sys.version_info[:2]
if py_ver >= (3, 3):
# Python 3.3 enabled hash randomization, so we need to shell out to get
# a reliable hash.
id_hash = __salt__['cmd.run']([sys.executable, '-c', 'print(hash("{0}"))'.format(id_)],
env={'PYTHONHASHSEED': '0'})
try:
id_hash = int(id_hash)
except (TypeError, ValueError):
log.debug('Failed to hash the ID to get the server_id grain. Result of hash command: %s', id_hash)
id_hash = None
if id_hash is None:
# Python < 3.3 or error encountered above
id_hash = hash(id_)
return abs(id_hash % (2 ** 31))
def get_server_id():
'''
Provides an integer based on the FQDN of a machine.
Useful as server-id in MySQL replication or anywhere else you'll need an ID
like this.
'''
# Provides:
# server_id
if salt.utils.platform.is_proxy():
server_id = {}
else:
use_crc = __opts__.get('server_id_use_crc')
if bool(use_crc):
id_hash = getattr(zlib, use_crc, zlib.adler32)(__opts__.get('id', '').encode()) & 0xffffffff
else:
log.debug('This server_id is computed not by Adler32 nor by CRC32. '
'Please use "server_id_use_crc" option and define algorithm you '
'prefer (default "Adler32"). Starting with Sodium, the '
'server_id will be computed with Adler32 by default.')
id_hash = _get_hash_by_shell()
server_id = {'server_id': id_hash}
return server_id
def get_master():
"""
Provides the minion with the name of its master.
This is useful in states to target other services running on the master.
"""
# Provides:
# master
return {"master": __opts__.get("master", "")}
def default_gateway():
"""
Populates grains which describe whether a server has a default gateway
configured or not. Uses `ip -4 route show` and `ip -6 route show` and greps
for a `default` at the beginning of any line. Assuming the standard
`default via <ip>` format for default gateways, it will also parse out the
ip address of the default gateway, and put it in ip4_gw or ip6_gw.
If the `ip` command is unavailable, no grains will be populated.
Currently does not support multiple default gateways. The grains will be
set to the first default gateway found.
List of grains:
ip4_gw: True # ip/True/False if default ipv4 gateway
ip6_gw: True # ip/True/False if default ipv6 gateway
ip_gw: True # True if either of the above is True, False otherwise
"""
grains = {}
ip_bin = salt.utils.path.which("ip")
if not ip_bin:
return {}
grains["ip_gw"] = False
grains["ip4_gw"] = False
grains["ip6_gw"] = False
for ip_version in ("4", "6"):
try:
out = __salt__["cmd.run"]([ip_bin, "-" + ip_version, "route", "show"])
for line in out.splitlines():
if line.startswith("default"):
grains["ip_gw"] = True
grains["ip{0}_gw".format(ip_version)] = True
try:
via, gw_ip = line.split()[1:3]
except ValueError:
pass
else:
if via == "via":
grains["ip{0}_gw".format(ip_version)] = gw_ip
break
except Exception: # pylint: disable=broad-except
continue
return grains
def kernelparams():
'''
Return the kernel boot parameters
'''
if salt.utils.platform.is_windows():
# TODO: add grains using `bcdedit /enum {current}`
return {}
else:
try:
with salt.utils.files.fopen('/proc/cmdline', 'r') as fhr:
cmdline = fhr.read()
grains = {'kernelparams': []}
for data in [item.split('=') for item in salt.utils.args.shlex_split(cmdline)]:
value = None
if len(data) == 2:
value = data[1].strip('"')
grains['kernelparams'] += [(data[0], value)]
except IOError as exc:
grains = {}
log.debug('Failed to read /proc/cmdline: %s', exc)
return grains
|
py | b40e1192fb2d6492eada3dda3bcdbd1e5960df46 | """
SciPy: A scientific computing package for Python
================================================
Documentation is available in the docstrings and
online at https://docs.scipy.org.
Contents
--------
SciPy imports all the functions from the NumPy namespace, and in
addition provides:
Subpackages
-----------
Using any of these subpackages requires an explicit import. For example,
``import scipy.cluster``.
::
cluster --- Vector Quantization / Kmeans
fft --- Discrete Fourier transforms
fftpack --- Legacy discrete Fourier transforms
integrate --- Integration routines
interpolate --- Interpolation Tools
io --- Data input and output
linalg --- Linear algebra routines
linalg.blas --- Wrappers to BLAS library
linalg.lapack --- Wrappers to LAPACK library
misc --- Various utilities that don't have
another home.
ndimage --- N-D image package
odr --- Orthogonal Distance Regression
optimize --- Optimization Tools
signal --- Signal Processing Tools
signal.windows --- Window functions
sparse --- Sparse Matrices
sparse.linalg --- Sparse Linear Algebra
sparse.linalg.dsolve --- Linear Solvers
sparse.linalg.dsolve.umfpack --- :Interface to the UMFPACK library:
Conjugate Gradient Method (LOBPCG)
sparse.linalg.eigen --- Sparse Eigenvalue Solvers
sparse.linalg.eigen.lobpcg --- Locally Optimal Block Preconditioned
Conjugate Gradient Method (LOBPCG)
spatial --- Spatial data structures and algorithms
special --- Special functions
stats --- Statistical Functions
Utility tools
-------------
::
test --- Run scipy unittests
show_config --- Show scipy build configuration
show_numpy_config --- Show numpy build configuration
__version__ --- SciPy version string
__numpy_version__ --- Numpy version string
"""
__all__ = ['test']
from numpy import show_config as show_numpy_config
if show_numpy_config is None:
raise ImportError(
"Cannot import SciPy when running from NumPy source directory.")
from numpy import __version__ as __numpy_version__
# Import numpy symbols to scipy name space (DEPRECATED)
from ._lib.deprecation import _deprecated
import numpy as _num
linalg = None
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.{0} instead')
# deprecate callable objects, skipping classes
for _key in _num.__all__:
_fun = getattr(_num, _key)
if callable(_fun) and not isinstance(_fun, type):
_fun = _deprecated(_msg.format(_key))(_fun)
globals()[_key] = _fun
from numpy.random import rand, randn
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.random.{0} instead')
rand = _deprecated(_msg.format('rand'))(rand)
randn = _deprecated(_msg.format('randn'))(randn)
# fft is especially problematic, so was removed in SciPy 1.6.0
from numpy.fft import ifft
ifft = _deprecated('scipy.ifft is deprecated and will be removed in SciPy '
'2.0.0, use scipy.fft.ifft instead')(ifft)
import numpy.lib.scimath as _sci
_msg = ('scipy.{0} is deprecated and will be removed in SciPy 2.0.0, '
'use numpy.lib.scimath.{0} instead')
for _key in _sci.__all__:
_fun = getattr(_sci, _key)
if callable(_fun):
_fun = _deprecated(_msg.format(_key))(_fun)
globals()[_key] = _fun
__all__ += _num.__all__
__all__ += ['randn', 'rand', 'ifft']
del _num
# Remove the linalg imported from NumPy so that the scipy.linalg package can be
# imported.
del linalg
__all__.remove('linalg')
# We first need to detect if we're being called as part of the SciPy
# setup procedure itself in a reliable manner.
try:
__SCIPY_SETUP__
except NameError:
__SCIPY_SETUP__ = False
if __SCIPY_SETUP__:
import sys as _sys
_sys.stderr.write('Running from SciPy source directory.\n')
del _sys
else:
try:
from scipy.__config__ import show as show_config
except ImportError as e:
msg = """Error importing SciPy: you cannot import SciPy while
being in scipy source directory; please exit the SciPy source
tree first and relaunch your Python interpreter."""
raise ImportError(msg) from e
from scipy.version import version as __version__
# Allow distributors to run custom init code
from . import _distributor_init
from scipy._lib import _pep440
if _pep440.parse(__numpy_version__) < _pep440.Version('1.16.5'):
import warnings
warnings.warn("NumPy 1.16.5 or above is required for this version of "
"SciPy (detected version %s)" % __numpy_version__,
UserWarning)
del _pep440
from scipy._lib._ccallback import LowLevelCallable
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
# This makes "from scipy import fft" return scipy.fft, not np.fft
del fft
|
py | b40e12f46ab352113bc27afadbd7c899f9cafe28 | import os
from flask import Flask, g
from flask_cors import CORS
app = Flask(__name__)
cors = CORS(app)
# Configurations
try:
env = os.environ['APPLICATION_ENV']
except KeyError as e:
# logging.error('Unknown environment key, defaulting to Development')
env = 'Development'
app.config.from_object('config.%s' % env)
app.config.update(
DEBUG=True,
TESTING=True,
TEMPLATES_AUTO_RELOAD=True
)
@app.errorhandler(404)
def not_found(error):
return "Not found", 404
@app.before_request
def before_request():
g.botId = None
from app.core.controllers import core as core
from app.stories.controllers import stories as stories
from app.train.controllers import train as train
from app.endpoint.controllers import endpoint as endpoint
from app.chat.controllers import chat as chat
app.register_blueprint(core)
app.register_blueprint(stories)
app.register_blueprint(train)
app.register_blueprint(endpoint)
app.register_blueprint(chat)
|
py | b40e12f8e38ffc1578b78a4c00d82f47ee80894a | """
Unit test framework
"""
import unittest
# include all the TestCase imports here
if __name__ == "__main__":
unittest.main() |
py | b40e1404d82e3f8013ac43c843daba98c2dd74f9 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import shutil
import requests
import tqdm
import hashlib
import tarfile
import zipfile
from .voc_utils import merge_and_create_list
import logging
logger = logging.getLogger(__name__)
__all__ = ['get_weights_path', 'get_dataset_path']
WEIGHTS_HOME = osp.expanduser("~/.cache/paddle/weights")
DATASET_HOME = osp.expanduser("~/.cache/paddle/dataset")
# dict of {dataset_name: (downalod_info, sub_dirs)}
# download info: (url, md5sum)
DATASETS = {
'coco': ([
(
'http://images.cocodataset.org/zips/train2017.zip',
'cced6f7f71b7629ddf16f17bbcfab6b2', ),
(
'http://images.cocodataset.org/zips/val2017.zip',
'442b8da7639aecaf257c1dceb8ba8c80', ),
(
'http://images.cocodataset.org/annotations/annotations_trainval2017.zip',
'f4bbac642086de4f52a3fdda2de5fa2c', ),
], ["annotations", "train2017", "val2017"]),
'voc': ([
(
'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar',
'6cd6e144f989b92b3379bac3b3de84fd', ),
(
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar',
'c52e279531787c972589f7e41ab4ae64', ),
(
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar',
'b6e924de25625d8de591ea690078ad9f', ),
], ["VOCdevkit/VOC_all"]),
}
DOWNLOAD_RETRY_LIMIT = 3
def get_weights_path(url):
"""Get weights path from WEIGHT_HOME, if not exists,
download it from url.
"""
return get_path(url, WEIGHTS_HOME)
def get_dataset_path(path, annotation, image_dir):
"""
If path exists, return path.
Otherwise, get dataset path from DATASET_HOME, if not exists,
download it.
"""
if _dataset_exists(path, annotation, image_dir):
return path
logger.info("Dataset {} is not valid for reason above, try searching {} or "
"downloading dataset...".format(
osp.realpath(path), DATASET_HOME))
for name, dataset in DATASETS.items():
if os.path.split(path.strip().lower())[-1] == name:
logger.info("Parse dataset_dir {} as dataset "
"{}".format(path, name))
data_dir = osp.join(DATASET_HOME, name)
# For voc, only check merged dir VOC_all
if name == 'voc':
check_dir = osp.join(data_dir, dataset[1][0])
if osp.exists(check_dir):
logger.info("Found {}".format(check_dir))
return data_dir
for url, md5sum in dataset[0]:
get_path(url, data_dir, md5sum)
# voc should merge dir and create list after download
if name == 'voc':
logger.info("Download voc dataset successed, merge "
"VOC2007 and VOC2012 to VOC_all...")
output_dir = osp.join(data_dir, dataset[1][0])
devkit_dir = "/".join(output_dir.split('/')[:-1])
years = ['2007', '2012']
# merge dir in output_tmp_dir at first, move to
# output_dir after merge sucessed.
output_tmp_dir = osp.join(data_dir, 'tmp')
if osp.isdir(output_tmp_dir):
shutil.rmtree(output_tmp_dir)
# NOTE(dengkaipeng): since using auto download VOC
# dataset, VOC default label list should be used,
# do not generate label_list.txt here. For default
# label, see ../data/source/voc_loader.py
merge_and_create_list(devkit_dir, years, output_tmp_dir)
shutil.move(output_tmp_dir, output_dir)
# remove source directory VOC2007 and VOC2012
shutil.rmtree(osp.join(devkit_dir, "VOC2007"))
shutil.rmtree(osp.join(devkit_dir, "VOC2012"))
return data_dir
# not match any dataset in DATASETS
raise ValueError("Dataset {} is not valid and cannot parse dataset type "
"'{}' for automaticly downloading, which only supports "
"'voc' and 'coco' currently".format(path, osp.split(path)[-1]))
def map_path(url, root_dir):
# parse path after download to decompress under root_dir
fname = url.split('/')[-1]
zip_formats = ['.zip', '.tar', '.gz']
fpath = fname
for zip_format in zip_formats:
fpath = fpath.replace(zip_format, '')
return osp.join(root_dir, fpath)
def get_path(url, root_dir, md5sum=None):
""" Download from given url to root_dir.
if file or directory specified by url is exists under
root_dir, return the path directly, otherwise download
from url and decompress it, return the path.
url (str): download url
root_dir (str): root dir for downloading, it should be
WEIGHTS_HOME or DATASET_HOME
md5sum (str): md5 sum of download package
"""
# parse path after download to decompress under root_dir
fullpath = map_path(url, root_dir)
# For same zip file, decompressed directory name different
# from zip file name, rename by following map
decompress_name_map = {
"VOC": "VOCdevkit/VOC_all",
"annotations_trainval": "annotations"
}
for k, v in decompress_name_map.items():
if fullpath.find(k) >= 0:
fullpath = '/'.join(fullpath.split('/')[:-1] + [v])
if osp.exists(fullpath):
logger.info("Found {}".format(fullpath))
else:
fullname = _download(url, root_dir, md5sum)
_decompress(fullname)
return fullpath
def _dataset_exists(path, annotation, image_dir):
"""
Check if user define dataset exists
"""
if not osp.exists(path):
logger.info("Config dataset_dir {} is not exits, "
"dataset config is not valid".format(path))
return False
if annotation:
annotation_path = osp.join(path, annotation)
if not osp.isfile(annotation_path):
logger.info("Config annotation {} is not a "
"file, dataset config is not "
"valid".format(annotation_path))
return False
if image_dir:
image_path = osp.join(path, image_dir)
if not osp.isdir(image_path):
logger.info("Config image_dir {} is not a "
"directory, dataset config is not "
"valid".format(image_path))
return False
return True
def _download(url, path, md5sum=None):
"""
Download from url, save to path.
url (str): download url
path (str): download to given path
"""
if not osp.exists(path):
os.makedirs(path)
fname = url.split('/')[-1]
fullname = osp.join(path, fname)
retry_cnt = 0
while not (osp.exists(fullname) and _md5check(fullname, md5sum)):
if retry_cnt < DOWNLOAD_RETRY_LIMIT:
retry_cnt += 1
else:
raise RuntimeError("Download from {} failed. "
"Retry limit reached".format(url))
logger.info("Downloading {} from {}".format(fname, url))
req = requests.get(url, stream=True)
if req.status_code != 200:
raise RuntimeError("Downloading from {} failed with code "
"{}!".format(url, req.status_code))
# For protecting download interupted, download to
# tmp_fullname firstly, move tmp_fullname to fullname
# after download finished
tmp_fullname = fullname + "_tmp"
total_size = req.headers.get('content-length')
with open(tmp_fullname, 'wb') as f:
if total_size:
for chunk in tqdm.tqdm(
req.iter_content(chunk_size=1024),
total=(int(total_size) + 1023) // 1024,
unit='KB'):
f.write(chunk)
else:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
shutil.move(tmp_fullname, fullname)
return fullname
def _md5check(fullname, md5sum=None):
if md5sum is None:
return True
logger.info("File {} md5 checking...".format(fullname))
md5 = hashlib.md5()
with open(fullname, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
md5.update(chunk)
calc_md5sum = md5.hexdigest()
if calc_md5sum != md5sum:
logger.info("File {} md5 check failed, {}(calc) != "
"{}(base)".format(fullname, calc_md5sum, md5sum))
return False
return True
def _decompress(fname):
"""
Decompress for zip and tar file
"""
logger.info("Decompressing {}...".format(fname))
# For protecting decompressing interupted,
# decompress to fpath_tmp directory firstly, if decompress
# successed, move decompress files to fpath and delete
# fpath_tmp and remove download compress file.
fpath = '/'.join(fname.split('/')[:-1])
fpath_tmp = osp.join(fpath, 'tmp')
if osp.isdir(fpath_tmp):
shutil.rmtree(fpath_tmp)
os.makedirs(fpath_tmp)
if fname.find('tar') >= 0:
with tarfile.open(fname) as tf:
tf.extractall(path=fpath_tmp)
elif fname.find('zip') >= 0:
with zipfile.ZipFile(fname) as zf:
zf.extractall(path=fpath_tmp)
else:
raise TypeError("Unsupport compress file type {}".format(fname))
for f in os.listdir(fpath_tmp):
src_dir = osp.join(fpath_tmp, f)
dst_dir = osp.join(fpath, f)
_move_and_merge_tree(src_dir, dst_dir)
shutil.rmtree(fpath_tmp)
os.remove(fname)
def _move_and_merge_tree(src, dst):
"""
Move src directory to dst, if dst is already exists,
merge src to dst
"""
if not osp.exists(dst):
shutil.move(src, dst)
else:
for fp in os.listdir(src):
src_fp = osp.join(src, fp)
dst_fp = osp.join(dst, fp)
if osp.isdir(src_fp):
if osp.isdir(dst_fp):
_move_and_merge_tree(src_fp, dst_fp)
else:
shutil.move(src_fp, dst_fp)
elif osp.isfile(src_fp) and \
not osp.isfile(dst_fp):
shutil.move(src_fp, dst_fp)
|
py | b40e140a7c6d9eb78c4a510a06eec862ddf914e2 | from nengo.builder import Builder, Signal
from nengo.builder.operator import Reset, SimPyFunc
from nengo.exceptions import BuildError
from nengo.node import Node
from nengo.processes import Process
from nengo.rc import rc
from nengo.utils.numpy import is_array_like
@Builder.register(Node)
def build_node(model, node):
"""Builds a `.Node` object into a model.
The node build function is relatively simple. It involves creating input
and output signals, and connecting them with an `.Operator` that depends
on the type of ``node.output``.
Parameters
----------
model : Model
The model to build into.
node : Node
The node to build.
Notes
-----
Sets ``model.params[node]`` to ``None``.
"""
# input signal
if not is_array_like(node.output) and node.size_in > 0:
sig_in = Signal(shape=node.size_in, name="%s.in" % node)
model.add_op(Reset(sig_in))
else:
sig_in = None
# Provide output
if node.output is None:
sig_out = sig_in
elif isinstance(node.output, Process):
sig_out = Signal(shape=node.size_out, name="%s.out" % node)
model.build(node.output, sig_in, sig_out, mode="set")
elif callable(node.output):
sig_out = (
Signal(shape=node.size_out, name="%s.out" % node)
if node.size_out > 0
else None
)
model.add_op(SimPyFunc(output=sig_out, fn=node.output, t=model.time, x=sig_in))
elif is_array_like(node.output):
sig_out = Signal(node.output.astype(rc.float_dtype), name="%s.out" % node)
else:
raise BuildError("Invalid node output type %r" % type(node.output).__name__)
model.sig[node]["in"] = sig_in
model.sig[node]["out"] = sig_out
model.params[node] = None
|
py | b40e16b8889735ae6355dbdfd805ea68a7e8b49e |
def from_mmtf_MMTFDecoder(item, molecular_system=None, atom_indices='all', frame_indices='all'):
from molsysmt.native.trajectory import Trajectory
from molsysmt.forms.api_mmtf_MMTFDecoder import get_frame_from_atom
tmp_item = Trajectory()
step, time, coordinates, box = get_frame_from_atom(item, indices=atom_indices, frame_indices=frame_indices)
tmp_item.append_frames(step=step, time=time, coordinates=coordinates, box=box)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item, atom_indices=atom_indices, frame_indices=frame_indices)
else:
tmp_molecular_system = None
return tmp_item, tmp_molecular_system
|
py | b40e1744497eca9fdb231624cb19027c0186ba89 | # -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.plotly as py
import plotly.graph_objs as go
import networkx as nx
import pickle
import boto3
import io
import numpy
import pandas
############################
# Load data
BUCKET_NAME = 'blog-seq-data' # replace with your bucket name
# list of topics
KEY = 'graph_and_labels' # replace with your object key
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=BUCKET_NAME, Key=KEY)
obj_str = obj['Body'].read()
graph_mat,topic_labels,dist_mat,doc_topic_mat = pickle.loads(obj_str)
topic_list = list(topic_labels.values())
# article info
KEY = 'article_info' # replace with your object key
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=BUCKET_NAME, Key=KEY)
obj_str = obj['Body'].read()
adf = pickle.loads(obj_str)
# topic sequence
KEY = 'topic_seq' # replace with your object key
s3 = boto3.client('s3')
obj = s3.get_object(Bucket=BUCKET_NAME, Key=KEY)
obj_str = obj['Body'].read()
topic_sequence = pickle.loads(obj_str)
############################
app = dash.Dash()
server = app.server
###########################
# Layout dict
layout = dict(
autosize=True,
height=500,
font=dict(color="#191A1A"),
titlefont=dict(color="#191A1A", size='14'),
margin=dict(
l=35,
r=35,
b=35,
t=45
),
hovermode="closest",
plot_bgcolor='#fffcfc',
paper_bgcolor='#fffcfc',
legend=dict(font=dict(size=10), orientation='h'),
title='Each dot is an NYC Middle School eligible for SONYC funding',
)
###########################
# Layout functions
demo_slides = "https://docs.google.com/presentation/d/e/2PACX-1vTomIS7JWIKKHgNcO_Lh8uvxhAjREwyKYGEUPKTAyqkgpsrH00pdP7FBZsPSUWpT6txcI6tZdsYjniw/pub?start=false&loop=false&delayms=3000"
# Navbar
def get_menu():
menu = html.Div([
dcc.Link('Overview ', href='/overview', className="tab first"),
dcc.Link('Price Performance ', href='/price-performance', className="tab"),
dcc.Link('Portfolio & Management ', href='/portfolio-management', className="tab"),
dcc.Link('Fees & Minimums ', href='/fees', className="tab"),
dcc.Link('Distributions ', href='/distributions', className="tab"),
dcc.Link('News & Reviews ', href='/news-and-reviews', className="tab")
], className="row ")
return menu
############################
# Page layouts
overview = html.Div([ # page 1
html.Div([
# Header
get_menu(),
], className="row "),
], className="page")
pricePerformance = html.Div(children=[
html.H1(children='Blog curator'),
html.Div(children='''
Pick a topic you'd like to learn about:
'''),
html.Div(id='output-container',style={'padding': 10}),
html.Div(children='''
Select where you want to start and finish your reading:
'''),
html.Div(id='output-container',style={'padding': 10}),
html.Div(id='my-datatable'),
html.Div(
[
html.Div(
[
dcc.Dropdown(
id='my-dropdown',
options=[{'label':topic, 'value':topic_no} for topic_no, topic in enumerate(topic_list)],
value=0)
], className = "two columns"
),
html.Div(
[
],
className="ten columns"
),
html.Div(
[
],className="twelve columns")
], className="row"
)
])
############################
# Layout section
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
##############################
# Callbacks
'''
@app.callback(
dash.dependencies.Output('my-datatable', 'children'),
[dash.dependencies.Input('my-dropdown', 'value')])
def update_rows(selected_value):
output_arr = []
for doc_no, topic in enumerate(topic_sequence[selected_value]):
if doc_no != 0 and topic == selected_value:
continue
else:
topic = int(topic)
test_str = adf.iloc[topic]['title'] + '. ' + adf.iloc[topic]['author'] + ' ' + adf.iloc[topic]['link'] + ' ' + adf.iloc[topic]['source']
output_arr.append(html.H3(topic_list[int(topic)]))
output_arr.append(html.P(test_str))
return output_arr
'''
# Update page
@app.callback(dash.dependencies.Output('page-content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/' or pathname == '/overview':
return overview
elif pathname == '/price-performance':
return pricePerformance
elif pathname == '/portfolio-management':
return portfolioManagement
elif pathname == '/fees':
return feesMins
elif pathname == '/distributions':
return distributions
elif pathname == '/news-and-reviews':
return newsReviews
elif pathname == '/full-view':
return overview,pricePerformance,portfolioManagement,feesMins,distributions,newsReviews
else:
return noPage
##############################
'''
external_css = ["https://fonts.googleapis.com/css?family=Product+Sans:400,400i,700,700i",
"https://cdn.rawgit.com/plotly/dash-app-stylesheets/2cc54b8c03f4126569a3440aae611bbef1d7a5dd/stylesheet.css"]
for css in external_css:
app.css.append_css({"external_url": css})
'''
# Boostrap CSS.
app.css.append_css({'external_url': 'https://cdn.rawgit.com/plotly/dash-app-stylesheets/2d266c578d2a6e8850ebce48fdb52759b2aef506/stylesheet-oil-and-gas.css'})
if __name__ == '__main__':
app.run_server(debug=True) |
py | b40e17a19ec3b7aabd56722bc47c1bd2ed6bc991 | import numpy as np
import torch as th
from torch import optim
import torch.nn.functional as F
import mne
from mne.io import concatenate_raws
from braindecode.util import var_to_np, np_to_var
from braindecode.datautil import SignalAndTarget
from braindecode.models import ShallowFBCSPNet
from braindecode.models.util import to_dense_prediction_model
from braindecode.util import set_random_seeds
from braindecode.experiments.monitors import compute_preds_per_trial_from_crops
from braindecode.datautil.iterators import CropsFromTrialsIterator
def test_cropped_decoding():
# 5,6,7,10,13,14 are codes for executed and imagined hands/feet
subject_id = 1
event_codes = [5, 6, 9, 10, 13, 14]
# This will download the files if you don't have them yet,
# and then return the paths to the files.
physionet_paths = mne.datasets.eegbci.load_data(subject_id, event_codes, update_path=True)
# Load each of the files
parts = [mne.io.read_raw_edf(path, preload=True, stim_channel='auto',
verbose='WARNING')
for path in physionet_paths]
# Concatenate them
raw = concatenate_raws(parts)
# Find the events in this dataset
events, _ = mne.events_from_annotations(raw)
# Use only EEG channels
eeg_channel_inds = mne.pick_types(raw.info, meg=False, eeg=True, stim=False,
eog=False,
exclude='bads')
# Extract trials, only using EEG channels
epoched = mne.Epochs(raw, events, dict(hands=2, feet=3), tmin=1, tmax=4.1,
proj=False, picks=eeg_channel_inds,
baseline=None, preload=True)
# Convert data from volt to millivolt
# Pytorch expects float32 for input and int64 for labels.
X = (epoched.get_data() * 1e6).astype(np.float32)
y = (epoched.events[:, 2] - 2).astype(np.int64) # 2,3 -> 0,1
train_set = SignalAndTarget(X[:60], y=y[:60])
test_set = SignalAndTarget(X[60:], y=y[60:])
# Set if you want to use GPU
# You can also use torch.cuda.is_available() to determine if cuda is available on your machine.
cuda = False
set_random_seeds(seed=20170629, cuda=cuda)
# This will determine how many crops are processed in parallel
input_time_length = 450
n_classes = 2
in_chans = train_set.X.shape[1]
# final_conv_length determines the size of the receptive field of the ConvNet
model = ShallowFBCSPNet(in_chans=in_chans, n_classes=n_classes,
input_time_length=input_time_length,
final_conv_length=12).create_network()
to_dense_prediction_model(model)
if cuda:
model.cuda()
optimizer = optim.Adam(model.parameters())
# determine output size
test_input = np_to_var(
np.ones((2, in_chans, input_time_length, 1), dtype=np.float32))
if cuda:
test_input = test_input.cuda()
out = model(test_input)
n_preds_per_input = out.cpu().data.numpy().shape[2]
print("{:d} predictions per input/trial".format(n_preds_per_input))
iterator = CropsFromTrialsIterator(batch_size=32,
input_time_length=input_time_length,
n_preds_per_input=n_preds_per_input)
losses = []
accuracies = []
for i_epoch in range(4):
# Set model to training mode
model.train()
for batch_X, batch_y in iterator.get_batches(train_set, shuffle=False):
net_in = np_to_var(batch_X)
if cuda:
net_in = net_in.cuda()
net_target = np_to_var(batch_y)
if cuda:
net_target = net_target.cuda()
# Remove gradients of last backward pass from all parameters
optimizer.zero_grad()
outputs = model(net_in)
# Mean predictions across trial
# Note that this will give identical gradients to computing
# a per-prediction loss (at least for the combination of log softmax activation
# and negative log likelihood loss which we are using here)
outputs = th.mean(outputs, dim=2, keepdim=False)
loss = F.nll_loss(outputs, net_target)
loss.backward()
optimizer.step()
# Print some statistics each epoch
model.eval()
print("Epoch {:d}".format(i_epoch))
for setname, dataset in (('Train', train_set), ('Test', test_set)):
# Collect all predictions and losses
all_preds = []
all_losses = []
batch_sizes = []
for batch_X, batch_y in iterator.get_batches(dataset,
shuffle=False):
net_in = np_to_var(batch_X)
if cuda:
net_in = net_in.cuda()
net_target = np_to_var(batch_y)
if cuda:
net_target = net_target.cuda()
outputs = model(net_in)
all_preds.append(var_to_np(outputs))
outputs = th.mean(outputs, dim=2, keepdim=False)
loss = F.nll_loss(outputs, net_target)
loss = float(var_to_np(loss))
all_losses.append(loss)
batch_sizes.append(len(batch_X))
# Compute mean per-input loss
loss = np.mean(np.array(all_losses) * np.array(batch_sizes) /
np.mean(batch_sizes))
print("{:6s} Loss: {:.5f}".format(setname, loss))
losses.append(loss)
# Assign the predictions to the trials
preds_per_trial = compute_preds_per_trial_from_crops(all_preds,
input_time_length,
dataset.X)
# preds per trial are now trials x classes x timesteps/predictions
# Now mean across timesteps for each trial to get per-trial predictions
meaned_preds_per_trial = np.array(
[np.mean(p, axis=1) for p in preds_per_trial])
predicted_labels = np.argmax(meaned_preds_per_trial, axis=1)
accuracy = np.mean(predicted_labels == dataset.y)
accuracies.append(accuracy * 100)
print("{:6s} Accuracy: {:.1f}%".format(
setname, accuracy * 100))
np.testing.assert_allclose(
np.array(losses),
np.array([1.31657708,
1.73548156,
1.02950428,
1.43932164,
0.78677772,
1.12382019,
0.55920881,
0.87277424]),
rtol=1e-4, atol=1e-5)
np.testing.assert_allclose(
np.array(accuracies),
np.array(
[50.,
46.66666667,
50.,
46.66666667,
50.,
46.66666667,
66.66666667,
50.]
),
rtol=1e-4, atol=1e-5)
|
py | b40e17b4420c06a053d2778e6ffe298f39747d90 | ### SIMPLE IMAGE DOWNLOADER
import httpx
from PIL import Image
import argparse
import os
import sys
from io import BytesIO
class JPGDL:
# MAIN DOWNLOADER FUNCTION
@staticmethod
def download(download_url, filename, output_folder=os.getcwd(), print_log=True):
"""
Download an IMAGE with a JPEG output.
download_url => Url link of the image file.
filename => Filename to be downloaded.
output_folder => Where to store the image. Defaults to current directory.
print_log => Print output information log.
"""
# check folder
if not os.path.exists(output_folder):
os.makedirs(output_folder) # make the folder if it doesnt exist
img_file = f"{filename}"
# check if the set filename in the args has `.jpg` with it
if not filename.strip().endswith(".jpg"):
img_file = f"{filename}.jpg" # filename
output_file = os.path.join(output_folder, img_file) # output file dir
# check if the filename exists in the directory
if os.path.exists(output_folder + img_file):
# print only if set to TRUE
if print_log:
print(
f"\n ![Err] Filename `{img_file}` already exists at folder `{output_folder}`."
)
sys.exit(1) # exit the app
file = ""
# start download
try:
# print only if set to TRUE
if print_log:
print(f"\n Downloading Image: `{img_file}` from > {download_url}")
file = httpx.get(download_url, timeout=None)
except httpx.ConnectError:
# print only if set to TRUE
if print_log:
print(
f"\n ![NetErr] The download url doesn't seem to exist or you are not connected to the internet."
)
sys.exit(1)
# try to convert the content to jpeg
try:
# print only if set to TRUE
if print_log:
print("\n Converting image to JPEG...")
image = ""
# some .png images have transparency,
# replace the background with white color
# based from :> https://stackoverflow.com/questions/9166400/convert-rgba-png-to-rgb-with-pil
if ".png" in download_url:
rsp = Image.open(BytesIO(file.content)).convert("RGBA")
image = Image.new("RGB", size=rsp.size, color=(255, 255, 255, 255))
image.paste(rsp, mask=rsp.split()[3])
else:
image = Image.open(BytesIO(file.content)).convert("RGB")
# save the image file
image.save(
output_file,
"jpeg", # save as jpeg,
quality=100, # some images becomes blerd, so set it
)
except Exception as e:
print(e)
# print only if set to TRUE
if print_log:
print(
"\n ![ConvErr] There was a problem while trying to save and convert the image to JPEG."
)
sys.exit(1)
# print done message
# print only if set to TRUE
if print_log:
print(
f"\n Image has been successfully downloaded.\n\tSaved to => {output_file}"
)
# this will run for the cli
def cli():
# Initiate the parser
parser = argparse.ArgumentParser()
# set parser arguments
parser.add_argument(
"-u",
"--url",
help="Download url / the link of the image. It must start with `https://` or `http://`",
required=True,
type=str,
)
parser.add_argument(
"-f",
"--filename",
help="Set filename of the image. With or without `.jpg`",
required=True,
type=str,
)
parser.add_argument(
"-o",
"--output",
help="Where to store the image. Default is the current directory",
type=str,
default=os.getcwd(),
)
parser.add_argument(
"-p",
"--print",
help="Print process. [true or false]. Defaults to true",
type=str,
default="true",
)
# check if there are arguments specified
# based from: https://stackoverflow.com/questions/4042452/display-help-message-with-python-argparse-when-script-is-called-without-any-argu
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
# check and get each argument
if args.url and args.filename:
pl = False
if str(args.print).lower() == "true": # if parameter is `true`
pl = True
# start downloading
JPGDL.download(
download_url=args.url,
filename=args.filename,
output_folder=args.output,
print_log=pl,
) # download the image with the handler
|
py | b40e17c302ac55a3959988575e0bdc28e039df4e | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from os import environ
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import Session, ApplicationRunner
class Component(Session):
"""
An application component calling the different backend procedures.
"""
def onJoin(self, details):
print("session attached {}".format(details))
return self.leave()
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/auth_ws"),
u"crossbardemo",
)
def make(config):
session = Component(config)
session.add_authenticator(
u'cryptosign',
authid=u'alice',
privkey=u'6e3a302aa67d55ffc2059efeb5cf679470b37a26ae9ac18693b56ea3d0cd331c',
)
return session
runner.run(make)
|
py | b40e18d87826827603478b92900c3ae06448068e | # Generated by Django 2.2.6 on 2019-12-10 08:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Exam',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('total_duration', models.DurationField(default=30)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('total_duration', models.DurationField(default=30)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='QuestionGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exam', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.Exam')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.Question')),
],
),
migrations.AddField(
model_name='question',
name='topic',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.Topic'),
),
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('is_right_answer', models.BooleanField(default=False)),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='exam.Question')),
],
),
]
|
py | b40e196421505b4ec1796397d86daf38d50d6a33 | from collections import OrderedDict
import pytest
import numpy as np
from test_base import VerosPyOMUnitTest
from veros.core import momentum, external, numerics
class MomentumTest(VerosPyOMUnitTest):
nx, ny, nz = 70, 60, 50
extra_settings = {
"coord_degree": True,
"enable_cyclic_x": True,
"enable_conserve_energy": True,
"enable_bottom_friction_var": True,
"enable_hor_friction_cos_scaling": True,
"enable_implicit_vert_friction": True,
"enable_explicit_vert_friction": True,
"enable_TEM_friction": True,
"enable_hor_friction": True,
"enable_biharmonic_friction": True,
"enable_ray_friction": True,
"enable_bottom_friction": True,
"enable_quadratic_bottom_friction": True,
"enable_momentum_sources": True,
"congr_epsilon": 1e-12,
"congr_max_iterations": 10000,
}
first = True
def initialize(self):
self.set_attribute("hor_friction_cosPower", np.random.randint(1, 5))
for a in ("dt_mom", "r_bot", "r_quad_bot", "A_h", "A_hbi", "AB_eps", "x_origin", "y_origin"):
self.set_attribute(a, np.random.rand())
for a in ("dxt", ):
self.set_attribute(a, 0.1 * np.ones(self.nx + 4) + 0.01 * np.random.rand(self.nx + 4))
for a in ("dyt", ):
self.set_attribute(a, 0.1 * np.ones(self.ny + 4) + 0.01 * np.random.rand(self.ny + 4))
for a in ("dzt", ):
self.set_attribute(a, np.random.rand(self.nz))
for a in ("r_bot_var_u", "r_bot_var_v", "surface_taux", "surface_tauy", "coriolis_t", "coriolis_h"):
self.set_attribute(a, np.random.randn(self.nx + 4, self.ny + 4))
for a in ("K_diss_v", "kappaM", "flux_north", "flux_east", "flux_top", "K_diss_bot", "K_diss_h",
"du_mix", "dv_mix", "u_source", "v_source", "du_adv", "dv_adv"):
self.set_attribute(a, np.random.randn(self.nx + 4, self.ny + 4, self.nz))
for a in ("u", "v", "w", "du", "dv"):
self.set_attribute(a, np.random.randn(self.nx + 4, self.ny + 4, self.nz, 3))
kbot = np.random.randint(1, self.nz, size=(self.nx + 4, self.ny + 4))
# add some islands, but avoid boundaries
kbot[3:-3, 3:-3].flat[np.random.randint(0, (self.nx - 2) * (self.ny - 2), size=10)] = 0
self.set_attribute("kbot", kbot)
numerics.calc_grid(self.veros_new)
numerics.calc_topo(self.veros_new)
self.veros_legacy.call_fortran_routine("calc_grid")
self.veros_legacy.call_fortran_routine("calc_topo")
if self.first:
external.streamfunction_init(self.veros_new)
self.veros_legacy.call_fortran_routine("streamfunction_init")
self.first = False
self.test_module = momentum
veros_args = (self.veros_new, )
veros_legacy_args = dict()
self.test_routines = OrderedDict()
self.test_routines["momentum_advection"] = (veros_args, veros_legacy_args)
self.test_routines["vertical_velocity"] = (veros_args, veros_legacy_args)
self.test_routines["momentum"] = (veros_args, veros_legacy_args)
def test_passed(self, routine):
for f in ("flux_east", "flux_north", "flux_top", "u", "v", "w", "K_diss_v", "du_adv", "dv_adv", "du", "dv",
"K_diss_bot", "du_mix", "dv_mix", "psi", "dpsi", "du_cor", "dv_cor"):
self.check_variable(f)
for f in ("K_diss_h", ):
self.check_variable(f, atol=1e-7)
@pytest.mark.pyom
def test_momentum(pyom2_lib, backend):
MomentumTest(fortran=pyom2_lib, backend=backend).run()
|
py | b40e19acf1b4dd8f31603f404855f4f9656ebdb6 | from .api import API
from .const import (
DEFAULT_KW_ROUND_PERSICION,
SUPPORTED_OPERATION_MODES,
DeviceType,
GridState,
GridStatus,
LineStatus,
MeterType,
OperationMode,
Roles,
SyncType,
User,
)
from .error import (
AccessDeniedError,
APIError,
MissingAttributeError,
PowerwallError,
PowerwallUnreachableError,
)
from .helpers import assert_attribute, convert_to_kw
from .responses import (
LoginResponse,
Meter,
MetersAggregates,
PowerwallStatus,
SiteInfo,
SiteMaster,
Solar,
)
from .powerwall import Powerwall
VERSION = "0.3.6"
|
py | b40e1a4c3e01d85d8a8c6a56eca9e37bceb15154 | # Copyright (c) 2016 Pontianak
import re
import itertools
# Sums a list
def listsum(numList):
theSum = 0
for numTemp in numList:
theSum = theSum + numTemp
return theSum
assTxt = open('regex_sum_190143.txt')
numsLst = list()
# Extract numbers and put into list, remove blanks []
for line in assTxt:
line = line.rstrip()
temp = re.findall('[0-9]+',line)
numsLst.append(temp)
if temp == [] in numsLst:
numsLst.remove(temp)
# Convert list of strings to ints
numsLst = [map(int,i) for i in numsLst]
print "\nnumsLst Output:\n" , numsLst , "\n\n"
# Changes the lists in the list into 1 list
merged = list(itertools.chain(*numsLst))
print "numsLst Merged:\n" , merged , "\n\n"
print "Sum of regex_sum_190143.txt:" , listsum(merged)
|
py | b40e1ab73db313b251f87cf446864be21d510434 | """
Authors:
Oguz Paksoy - 150150111
Merve Elif Demirtas - 150160706
Date:
18.3.2019
"""
from socket import *
import threading
import json
import os
questions = [
{
'id': 1,
'question': 'Which one of the following can be classified as an access network?',
'options': [
'A Web server',
'A personal computer which runs an application that connects to the Web',
'Residential Wi-Fi',
'End-point router of an international ISP connecting to a regional ISP'
]
},
{
'id': 2,
'question': 'Which one is true for both Peer-to-Peer and Server-Client architectures?',
'options': [
'A host with a static IP that transfers data is necessary for all clients',
'Clients may come and go, and can also change IPs',
'There always exists a mediator between two client end systems',
'Multiple clients can directly connect to each other'
]
},
{
'id': 3,
'question': 'Which one of the following is an electronic mail protocol?',
'options': [
'SMTP',
'HTTP',
'FTP',
'RFC'
]
},
{
'id': 4,
'question': 'What is the main difference between UDP and TCP?',
'options': [
'TCP is a faster and more reliable version of UDP',
'TCP is connection oriented, UDP is connectionless',
'UDP is a faster and more reliable version of TCP',
'UDP is connection oriented, TCP is connectionless'
]
},
{
'id': 5,
'question': 'Which one of the following fields is not present on both TCP and UDP datagrams?',
'options': [
"Sender's Port Number",
"Receiver's Port Number",
'Checksum',
'Acknowledgement Number'
]
}
]
answers = [2, 1, 0, 1, 3]
class ThreadedServer:
def listenToClient(self, client, addr):
try:
client.sendall('Please enter a username.'.encode())
user = client.recv(1024).decode()
userScores = [score for score in self.scores if score.startswith(user)]
prevScores = ''.join(['{}) {}'.format(i + 1, result.split(': ', 1)[1])
for i, result in zip(range(5), userScores)])
client.sendall('Hello, {}! Your previous results are as follows:\n\n{}'.format(user, prevScores).encode())
mode = client.recv(1024).decode()
if mode == 'exit':
client.close()
elif mode == 'begin':
score = 0
userAnswers = []
client.sendall(str(len(questions)).encode())
if int(client.recv(1024).decode()) != len(questions):
client.close()
exit(1)
for i in range(len(questions)):
client.sendall(json.dumps(questions[i]).encode())
answer = int(client.recv(1024).decode())
userAnswers.append(answer)
if answer == answers[i]:
score += 1
client.sendall('Correct answer.'.encode())
else:
client.sendall('Incorrect answer.'.encode())
client.sendall('Quiz complete. You scored {} / {}'.format(score, len(questions)).encode())
client.close()
answersString = ', '.join(['({}: {:2d})'.format(i + 1, ans) for i, ans in enumerate(userAnswers)])
self.scores.insert(0, '{}: {} / {} [Answers: {}]\n'.format(user, score, len(questions), answersString))
else:
client.close()
exit(1)
except:
print('Connection to {} was aborted before the quiz ended.'.format(addr))
exit(1)
exit(0)
def __init__(self, scores):
self.scores = scores
def startOnPort(self, serverPort):
try:
serverSocket = socket(AF_INET, SOCK_STREAM)
except:
print("Socket cannot be created!!!")
exit(1)
print("Socket is created...")
try:
serverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
except:
print("Socket cannot be used!!!")
exit(1)
print("Socket is being used...")
try:
serverSocket.bind(('', serverPort))
except:
print("Binding cannot de done!!!")
exit(1)
print("Binding is done...")
try:
serverSocket.listen(45)
except:
print("Server cannot listen!!!")
exit(1)
print("The server is ready to receive")
while True:
connectionSocket = None
try:
serverSocket.settimeout(10)
connectionSocket, addr = serverSocket.accept()
threading.Thread(target=self.listenToClient, args=(connectionSocket, addr), daemon=True).start()
except timeout:
continue
except KeyboardInterrupt:
if connectionSocket:
connectionSocket.close()
serverSocket.close()
break
def getScores(self):
return self.scores
if __name__ == '__main__':
serverPort = 12000
if os.path.exists('results.txt'):
with open('results.txt', 'r') as readResultsFile:
results = readResultsFile.readlines()
else:
results = []
server = ThreadedServer(results)
try:
server.startOnPort(serverPort)
except KeyboardInterrupt:
scores = server.getScores()
with open('results.txt', 'w') as writeResultsFile:
writeResultsFile.writelines(scores)
|
py | b40e1abe7e61a2eab184481a6a2fb5f1d1a0baad | import sys, os
import json
import boto3
from helper import AwsHelper
import datetime
import pickle
class MetadataClient:
def __init__(self, targetArn, targetType="sns", region=None, body=None):
metadataType = "generic"
if region == None:
region = os.environ.get('AWS_REGION')
if targetType not in ['sns', 'lambda']:
raise ValueError("MetadataClient does not accept targets of type {}".format(targetType))
self._region = region
self._targetArn = targetArn
self._targetType = targetType
self._client = AwsHelper().getClient(self._targetType, awsRegion=self._region)
self._metadataType = metadataType
self._requiredKeys = set()
if body and not isinstance(body, dict):
raise ValueError("'body' has to be a valid dictionary")
elif body == None:
self._body = {}
else:
self._body = body
@property
def body(self):
return self._body
@property
def targetArn(self):
return self._targetArn
@property
def targetType(self):
return self._targetType
@property
def client(self):
return self._client
@property
def requiredKeys(self):
return self._requiredKeys
@property
def metadataType(self):
return self._metadataType
@requiredKeys.setter
def requiredKeys(self, value):
self._requiredKeys = value
@body.setter
def body(self, value):
self._body = value
def _validate_payload(self, payload):
if not self.requiredKeys.issubset(set(payload.keys())):
return False
return True
def _publishSNS(self, message, messageGroupId, messageAttributes):
print("publishing to SNS")
try:
print(self.client.publish(
TopicArn = self.targetArn,
Message = message,
MessageGroupId = messageGroupId,
MessageAttributes = {
'metadataType': {
'DataType' : 'String',
'StringValue': self.metadataType
}
}
))
except Exception as e:
print(e)
raise Exception("Unable to publish to topic {}".format(self.targetArn))
def publish(self, body, subsetKeys=[]):
timestamp = str(datetime.datetime.utcnow())
# Merge existing body with incoming body
payload = {"timestamp": timestamp, **self.body, **body}
valid = self._validate_payload(payload)
if not valid:
raise ValueError("Incorrect client payload structure. Please double check the required keys!")
documentId = payload['documentId']
payloadJson = json.dumps(payload)
if self.targetType == 'lambda':
raise ValueError("Not implemented")
elif self.targetType == 'sns':
messageAttributes = {
'metadataType': {
'DataType' : 'String',
'StringValue': self.metadataType
}
}
self._publishSNS(payloadJson, documentId, messageAttributes)
else:
raise ValueError("Invalid targetType")
class PipelineOperationsClient(MetadataClient):
def __init__(self, targetArn, region=None, targetType="sns", body=None):
super().__init__(targetArn, targetType, region, body)
self._metadataType = "pipeline-operations"
self._requiredKeys = {"documentId", "bucketName", "objectName", "status", "stage"}
def initDoc(self):
super().publish({
"status" : "IN_PROGRESS",
"initDoc" : "True"
})
def stageInProgress(self, message=None):
if message:
super().publish({
"status" : "IN_PROGRESS",
"message" : message
})
else:
super().publish({
"status" : "IN_PROGRESS",
})
def stageSucceeded(self, message=None):
if message:
super().publish({
"status" : "SUCCEEDED",
"message" : message
})
else:
super().publish({
"status" : "SUCCEEDED"
})
def stageFailed(self, message=None):
if message:
super().publish({
"status" : "FAILED",
"message" : message
})
else:
super().publish({
"status" : "FAILED"
})
class DocumentLineageClient(MetadataClient):
def __init__(self, targetArn, region=None, targetType="sns", body=None):
super().__init__(targetArn, targetType, region, body)
self._metadataType = "document-lineage"
self._requiredKeys = {"documentId", "callerId", "targetBucketName", "targetFileName", "s3Event"}
def recordLineage(self, body):
print("Recording Lineage")
print(body)
if 's3Event' not in body:
super().publish({"s3Event": "ObjectCreated:Put", **body})
else:
super().publish(body)
def recordLineageOfCopy(self, body):
print("Recording Lineage of S3 Copy")
print(body)
super().publish({"s3Event": "ObjectCreated:Copy", **body})
class DocumentRegistryClient(MetadataClient):
def __init__(self, targetArn, region=None, targetType="sns", body=None):
super().__init__(targetArn, targetType, region, body)
self._metadataType = "document-registry"
self._requiredKeys = {"documentId", "bucketName", "documentName", "documentLink", "principalIAMWriter"}
def registerDocument(self, body):
print(body)
super().publish(body) |
py | b40e1b7df51f7cdbb076fbeaccc25c90b2590209 | ############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
# Passes on URLError, timeout, and BadStatusLine exceptions.
# See:
# http://docs.python.org/2/library/urllib2.html
# http://docs.python.org/2/library/httplib.html
#
############################################################
try:
import json
except ImportError:
import sys
sys.path.append('simplejson-2.3.3')
import simplejson as json
import urllib2
import httplib
import urlparse
import random
import base64
import httplib2
from urllib2 import URLError, HTTPError
from ConfigParser import ConfigParser
import os
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password,
auth_svc='https://nexus.api.globusonline.org/goauth/token?' +
'grant_type=client_credentials'):
# This is bandaid helper function until we get a full
# KBase python auth client released
h = httplib2.Http(disable_ssl_certificate_validation=True)
auth = base64.encodestring(user_id + ':' + password)
headers = {'Authorization': 'Basic ' + auth}
h.add_credentials(user_id, password)
h.follow_all_redirects = True
url = auth_svc
resp, content = h.request(url, 'GET', headers=headers)
status = int(resp['status'])
if status >= 200 and status <= 299:
tok = json.loads(content)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination %s:%s' % (user_id, password))
else:
raise Exception(str(resp))
return tok['access_token']
def _read_rcfile(file=os.environ['HOME'] + '/.authrc'): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if os.path.exists(file):
try:
with open(file) as authrc:
rawdata = json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {x: rawdata.get(x) for x in (
'user_id', 'token', 'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading authrc file %s: %s" % (file, e)
return authdata
def _read_inifile(file=os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if os.path.exists(file):
try:
config = ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in
('user_id', 'token', 'client_secret',
'keyfile', 'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading INI file %s: %s" % (file, e)
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return json.JSONEncoder.default(self, obj)
class Ontology(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False):
if url is None:
raise ValueError('A url is required')
scheme, _, _, _, _, _ = urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in os.environ:
self._headers['AUTHORIZATION'] = os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def get_goidlist(self, geneIDList, domainList, ecList):
arg_hash = {'method': 'Ontology.get_goidlist',
'params': [geneIDList, domainList, ecList],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_go_description(self, goIDList):
arg_hash = {'method': 'Ontology.get_go_description',
'params': [goIDList],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_go_enrichment(self, geneIDList, domainList, ecList, type, ontologytype):
arg_hash = {'method': 'Ontology.get_go_enrichment',
'params': [geneIDList, domainList, ecList, type, ontologytype],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def get_go_annotation(self, geneIDList):
arg_hash = {'method': 'Ontology.get_go_annotation',
'params': [geneIDList],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
def association_test(self, gene_list, ws_name, in_obj_id, out_obj_id, type, correction_method, cut_off):
arg_hash = {'method': 'Ontology.association_test',
'params': [gene_list, ws_name, in_obj_id, out_obj_id, type, correction_method, cut_off],
'version': '1.1',
'id': str(random.random())[2:]
}
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
try:
request = urllib2.Request(self.url, body, self._headers)
ret = urllib2.urlopen(request, timeout=self.timeout)
except HTTPError as h:
if _CT in h.headers and h.headers[_CT] == _AJ:
b = h.read()
err = json.loads(b)
if 'error' in err:
raise ServerError(**err['error'])
else: # this should never happen... but if it does
se = ServerError('Unknown', 0, b)
se.httpError = h
# h.read() will return '' in the calling code.
raise se
else:
raise h
if ret.code != httplib.OK:
raise URLError('Received bad response code from server:' +
ret.code)
resp = json.loads(ret.read())
if 'result' in resp:
return resp['result'][0]
else:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
|
py | b40e1c1a53e1acfee86e33294bc307dafc221b36 | ###
|
py | b40e1c314f7736f8b4e19f46d0c6bfa91e0af1a9 | from datetime import datetime
from django.test import TestCase
from monitoring.models import TyreMeasurement
from monitoring.serializers import TyreMeasurementSerializer
from monitoring.tests.fixtures import load_tyre_measurements
class TyreMeasurementSerializerTestCase(TestCase):
def test_representation(self):
load_tyre_measurements()
measurement = TyreMeasurement.objects.all().first()
serializer_data = TyreMeasurementSerializer(measurement).data
self.assertEqual(datetime.strptime(serializer_data['timestamp'], '%Y-%m-%dT%H:%M:%S'), measurement.timestamp)
expected_fields = [
'id',
'position',
'pressure',
'temperature',
'omega',
'speed',
'car',
]
for field in expected_fields:
self.assertEqual(serializer_data[field], getattr(measurement, field))
|
py | b40e1cae29e09b8c0f183b3026e06fa8a7ccf505 | # -*- Python -*-
import platform
import lit.formats
config.name = "Extra Tools Unit Tests"
config.suffixes = [] # Seems not to matter for google tests?
# Test Source and Exec root dirs both point to the same directory where google
# test binaries are built.
config.test_source_root = config.extra_tools_obj_dir
config.test_exec_root = config.test_source_root
# All GoogleTests are named to have 'Tests' as their suffix. The '.' option is
# a special value for GoogleTest indicating that it should look through the
# entire testsuite recursively for tests (alternatively, one could provide a
# ;-separated list of subdirectories).
config.test_format = lit.formats.GoogleTest('.', 'Tests')
if platform.system() == 'Darwin':
shlibpath_var = 'DYLD_LIBRARY_PATH'
elif platform.system() == 'Windows':
shlibpath_var = 'PATH'
else:
shlibpath_var = 'LD_LIBRARY_PATH'
# Point the dynamic loader at dynamic libraries in 'lib'.
shlibpath = os.path.pathsep.join((config.shlibdir, config.llvm_libs_dir,
config.environment.get(shlibpath_var,'')))
# Win32 seeks DLLs along %PATH%.
if sys.platform in ['win32', 'cygwin'] and os.path.isdir(config.shlibdir):
shlibpath = os.path.pathsep.join((config.shlibdir, shlibpath))
config.environment[shlibpath_var] = shlibpath
|
py | b40e1dab677f7cc439a6b843716a5183613cec38 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('addons', '0009_auto_20160305_1212'),
]
operations = [
migrations.AddField(
model_name='addon',
name='volume_ids',
field=models.TextField(verbose_name='OpenStack Volume Ids', blank=True),
),
migrations.AddField(
model_name='addon',
name='volume_size',
field=models.IntegerField(default=16, verbose_name='OpenStack Volume Size'),
preserve_default=False,
),
]
|
py | b40e1e099bb143831229710c623c28b55ebe0b58 | # This file is generated by objective.metadata
#
# Last update: Sun Mar 22 17:12:49 2020
#
# flake8: noqa
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b):
return b
else:
def sel32or64(a, b):
return a
misc = {}
constants = """$SLServiceTypeFacebook$SLServiceTypeLinkedIn$SLServiceTypeSinaWeibo$SLServiceTypeTencentWeibo$SLServiceTypeTwitter$"""
enums = """$SLRequestMethodDELETE@2$SLRequestMethodGET@0$SLRequestMethodPOST@1$SLRequestMethodPUT@3$"""
misc.update({})
r = objc.registerMetaDataForSelector
objc._updatingMetadata(True)
try:
r(b"SLComposeServiceViewController", b"isContentValid", {"retval": {"type": b"Z"}})
r(
b"SLRequest",
b"performRequestWithHandler:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"@"},
3: {"type": b"@"},
},
}
}
}
},
)
finally:
objc._updatingMetadata(False)
expressions = {}
# END OF FILE
|
py | b40e1e25d5e753c06112795326cf06edd3aa3d5e | """A wrapper for the sponsorblock API."""
from .client import Client
from .errors import (
BadRequest,
DuplicateException,
Forbidden,
HTTPException,
InvalidJSONException,
NotFoundException,
RateLimitException,
ServerException,
UnexpectedException,
)
from .models import Segment, User, TopUser, TotalStats, SegmentInfo, SearchedUser
from .utils import SortType
__name__ = "sponsorblock.py" # noqa
__version__ = "0.2.0"
|
py | b40e1e7b47339e763cb2af91ec1207964ef989f9 | from setuptools import setup
setup(
name='SuckControl',
version='1.1.1',
packages=[
'cli-ui',
'numpy',
'pythonnet'
],
url='https://github.com/Nama/SuckControl/',
license='MIT',
author='Yama',
description='Automatic speed control of any fan depending on any temperature sensor.'
)
|
py | b40e1ea0246e265e86b76d3ad07b0738a7a151d5 | """
Contains the definition of ELBO objective used to train the GQN model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import List
import tensorflow as tf
TfTensorList = List[tf.Tensor]
def gqn_draw_elbo(
mu_target: tf.Tensor, sigma_target: tf.Tensor,
mu_q: TfTensorList, sigma_q: TfTensorList,
mu_pi: TfTensorList, sigma_pi: TfTensorList,
target_frame: tf.Tensor,
scope='GQN_DRAW_ELBO'):
"""
Defines the ELBO of the GQN graph.
Arguments:
mu_target: The mean parameterizing the final image sampling.
sigma_target: The sigma parameterizing the final image sampling.
mu_q: A list of mus parameterizing the posterior for every image generation step.
sigma_q: A list of sigmas parameterizing the posterior for every image generation step.
mu_pi: A list of mus parameterizing the prior for every image generation step.
sigma_pi: A list of sigmas parameterizing the prior for every image generation step.
target_frame: The ground truth target frame to produce (i.e. the 'label').
scope: The variable scope name of the objective graph.
Returns:
elbo: Scalar. Expected value over the negative log-likelihood of the target frame given \
the target distribution regularized by the cumulative KL divergence between posterior \
and prior distributions at every image generation step.
endpoints: A dictionary of relevant computational endpoints for quick access to the graph \
nodes.
"""
with tf.variable_scope(scope):
endpoints = {}
# negative log-likelihood of target frame given target distribution
target_normal = tf.distributions.Normal(loc=mu_target, scale=sigma_target)
target_llh = tf.identity(
input=-tf.reduce_sum(
tf.reduce_mean(target_normal.log_prob(target_frame), axis=0)),
name='target_llh')
endpoints['target_llh'] = target_llh
# KL divergence regularizer over all generation steps
kl_div_list = []
for mu_q_l, sigma_q_l, mu_pi_l, sigma_pi_l in zip(mu_q, sigma_q, mu_pi, sigma_pi):
posterior_normal_l = tf.distributions.Normal(loc=mu_q_l, scale=sigma_q_l)
prior_normal_l = tf.distributions.Normal(loc=mu_pi_l, scale=sigma_pi_l)
kl_div_l = tf.distributions.kl_divergence(posterior_normal_l, prior_normal_l)
# kl_div_l = tf.Print(
# input_=kl_div_l,
# data=[tf.reduce_sum(tf.cast(tf.is_nan(kl_div_l), tf.float32))]
# ) # debug
kl_div_list.append(kl_div_l)
kl_regularizer = tf.identity(
input=tf.reduce_sum(
tf.reduce_mean(tf.add_n(kl_div_list), axis=0)),
name='kl_regularizer')
endpoints['kl_regularizer'] = kl_regularizer
# final ELBO term
# target_llh = tf.Print(input_=target_llh, data=[target_llh]) # debug
# kl_regularizer = tf.Print(input_=kl_regularizer, data=[kl_regularizer]) # debug
elbo = target_llh + kl_regularizer
return elbo, endpoints
def gqn_vae_elbo(
mu_target: tf.Tensor, sigma_target: tf.Tensor,
mu_q: tf.Tensor, sigma_q: tf.Tensor,
target_frame: tf.Tensor,
scope='GQN_VAE_ELBO'):
"""
Defines the ELBO of the GQN-VAE baseline graph.
Arguments:
mu_target: The mean parameterizing the final image sampling.
sigma_target: The sigma parameterizing the final image sampling.
mu_q: The mean parameterizing the posterior for image generation.
sigma_q: The sigma parameterizing the posterior for image generation.
target_frame: The ground truth target frame to produce (i.e. the 'label').
scope: The variable scope name of the objective graph.
Returns:
elbo: Scalar. Expected value over the negative log-likelihood of the target frame given \
the target distribution regularized by the cumulative KL divergence between posterior \
and prior distributions for image generation.
"""
with tf.variable_scope(scope):
# negative log-likelihood of target frame given target distribution
target_normal = tf.distributions.Normal(loc=mu_target, scale=sigma_target)
target_llh = tf.identity(
input=-tf.reduce_sum(
tf.reduce_mean(target_normal.log_prob(target_frame), axis=0)),
name='target_llh')
# KL divergence regularizer
posterior_normal = tf.distributions.Normal(loc=mu_q, scale=sigma_q)
prior_normal = tf.distributions.Normal(loc=tf.zeros_like(mu_q),
scale=tf.ones_like(sigma_q))
kl_div = tf.distributions.kl_divergence(posterior_normal, prior_normal)
kl_regularizer = tf.identity(
input=tf.reduce_sum(
tf.reduce_mean(tf.add_n(kl_div), axis=0)),
name='kl_regularizer')
# final ELBO term
elbo = target_llh + kl_regularizer
return elbo
|
py | b40e1f3318be95ee43dc1ddd8efc22a9ff229466 | from cytoolz import curry
from eth_utils import decode_hex
from evm.exceptions import (
ValidationError,
)
@curry
def new_transaction(
vm,
from_,
to,
amount=0,
private_key=None,
gas_price=10,
gas=100000,
data=b''):
"""
Create and return a transaction sending amount from <from_> to <to>.
The transaction will be signed with the given private key.
"""
nonce = vm.state.account_db.get_nonce(from_)
tx = vm.create_unsigned_transaction(
nonce=nonce, gas_price=gas_price, gas=gas, to=to, value=amount, data=data)
if private_key:
return tx.as_signed_transaction(private_key, chain_id=1)
else:
return tx
def fill_block(chain, from_, key, gas, data):
recipient = decode_hex('0xa94f5374fce5edbc8e2a8697c15331677e6ebf0c')
amount = 100
vm = chain.get_vm()
assert vm.state.gas_used == 0
while True:
tx = new_transaction(chain.get_vm(), from_, recipient, amount, key, gas=gas, data=data)
try:
chain.apply_transaction(tx)
except ValidationError as exc:
if "Transaction exceeds gas limit" == str(exc):
break
else:
raise exc
assert chain.get_vm().state.gas_used > 0
|
py | b40e1fb8627f1627843d9ea0dadb2e03067334ae | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from aldryn_apphooks_config.app_base import CMSConfigApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
from .models import NewsBlogConfig
class NewsBlogApp(CMSConfigApp):
name = _('NewsBlog')
app_name = 'aldryn_newsblog'
app_config = NewsBlogConfig
urls = ['aldryn_newsblog.urls'] # COMPAT: CMS3.2
def get_urls(self, *args, **kwargs):
return self.urls
# NOTE: Please do not add a «menu» here, menu’s should only be added by at
# the discretion of the operator.
apphook_pool.register(NewsBlogApp)
|
py | b40e1fccd92615a656ce335555788f35c2d46c29 | from .base import BaseConfig
class ProductionConfig(BaseConfig):
"""
生产环境配置
"""
pass
|
py | b40e200cc5e512d43c5d3445b02bcbcec6594714 | from __future__ import print_function
import re
import pymol
cmd = __import__("sys").modules["pymol.cmd"]
from . import setting
from . import parsing
import threading
from .cmd import DEFAULT_ERROR, DEFAULT_SUCCESS, _raising, \
is_list, is_string, is_error
QuietException = parsing.QuietException
_prefix = "_tmp_editor"
tmp_wild = _prefix + "*"
tmp_editor = _prefix + "0"
tmp_connect = _prefix + "_con"
tmp_domain = _prefix + "_dom"
tmp1 = _prefix + "1"
tmp2 = _prefix + "2"
tmp3 = _prefix + "3"
tmp4 = _prefix + "4"
# routines to assist in molecular editing
class undocontext:
def __init__(self, cmd, sele):
# not implemented in open-source
pass
def __enter__(self):
# not implemented in open-source
pass
def __exit__(self, exc_type, exc_value, traceback):
# not implemented in open-source
pass
def attach_fragment(selection,fragment,hydrogen,anchor,_self=cmd):
'''
ARGUMENTS
selection = str: must be "pk1"
fragment = str: fragment name to load from fragment library
hydrogen = int: hydrogen atom ID in fragment to fuse
anchor = int: none-hydrogen atom ID in fragment to fuse
'''
if selection not in _self.get_names("selections"):
if fragment in _self.get_names("objects"):
print(" Error: an object with than name already exists")
raise QuietException
else:
_self.fragment(fragment)
if _self.get_setting_boolean("auto_remove_hydrogens"):
_self.remove("(hydro and %s)"%fragment)
else:
_self.fragment(fragment,tmp_editor, origin=0)
if _self.count_atoms("((%s) and elem H)"%selection,quiet=1):
_self.fuse("(%s and id %d)"%(tmp_editor,hydrogen),"(pk1)",1)
if _self.get_setting_boolean("auto_remove_hydrogens"):
_self.remove("(hydro and pkmol)")
else:
_self.remove("(%s and id %d)"%(tmp_editor,hydrogen))
_self.fuse("(%s and id %d)"%(tmp_editor,anchor),"(pk1)",1)
if _self.get_setting_boolean("auto_remove_hydrogens"):
_self.remove("(hydro and pkmol)")
elif _self.count_atoms('hydro and (neighbor pk2)'):
_self.h_fill()
_self.delete(tmp_editor)
def combine_fragment(selection,fragment,hydrogen,anchor,_self=cmd):
with undocontext(_self, selection):
_self.fragment(fragment,tmp_editor)
try:
if _self.get_setting_boolean("auto_remove_hydrogens"):
_self.remove("(hydro and ?%s)" % tmp_editor)
_self.fuse("?%s" % tmp_editor, "(%s)" % selection, 3)
finally:
_self.delete(tmp_editor)
def attach_amino_acid(selection,amino_acid,center=0,animate=-1,object="",hydro=-1,ss=-1,_self=cmd):
'''
ARGUMENTS
selection = str: named selection of single N or C atom
amino_acid = str: fragment name to load from fragment library
center = bool: center on new terminus (pk1)
animate = int: animate centering
object = str: name of new object (if selection is none)
hydro = int (-1/0/1): keep hydrogens
ss = int: Secondary structure 1=alpha helix, 2=antiparallel beta, 3=parallel beta, 4=flat
'''
r = DEFAULT_SUCCESS
ss = int(ss)
center = int(center)
if hydro<0:
hydro = not int(_self.get_setting_boolean("auto_remove_hydrogens"))
if (selection not in _self.get_names('all')
if selection == 'pk1' # legacy, calling functions should pass '?pk1'
else _self.count_atoms(selection) == 0):
if object == "":
object = amino_acid
# create new object
if amino_acid in _self.get_names("objects"):
print("Error: an object with than name already exists")
raise QuietException
r = _self.fragment(amino_acid,object)
if not hydro:
_self.remove("(hydro and %s)"%object)
if _self.count_atoms("((%s) and name C)"%object):
_self.edit("((%s) and name C)"%object)
elif _self.count_atoms("((%s) and name N)"%object):
_self.edit("((%s) and name N)"%object)
elif _self.select(tmp_connect,"(%s) & elem N,C"%selection) != 1:
print("Error: invalid connection point: must be one atom, name N or C.")
_self.delete(tmp_wild)
raise QuietException
elif amino_acid in ["nhh","nme"] and _self.select(tmp_connect,"(%s) & elem C"%selection) != 1:
print("Error: invalid connection point: must be C for residue '%s'"%(amino_acid))
_self.delete(tmp_wild)
raise QuietException
elif amino_acid in ["ace"] and _self.select(tmp_connect,"(%s) & elem N"%selection) != 1:
print("Error: invalid connection point: must be N for residue '%s'"%(amino_acid))
_self.delete(tmp_wild)
raise QuietException
else:
if ss<0:
ss = _self.get_setting_int("secondary_structure")
if ss:
if ss==1: # helix
phi=-57.0
psi=-47.0
elif ss==2: # antipara-beta
phi=-139.0
psi=135.0
elif ss==3: # para-beta
phi=-119.0
psi=113.0
else:
phi=180.0
psi=180.0
_self.fragment(amino_acid,tmp_editor, origin=0)
if _self.count_atoms("elem N",domain=tmp_connect):
tmp = [ None ]
_self.iterate(tmp_connect,"tmp[0]=resv", space={ 'tmp' : tmp })
tmp[0] = str(tmp[0]-1) # counting down
_self.alter(tmp_editor,"resi=tmp[0]",space={ 'tmp' : tmp})
_self.set_geometry(tmp_connect, 3, 3) # make nitrogen planar
_self.fuse("(%s and name C)"%(tmp_editor),tmp_connect,2)
_self.select(tmp_domain, "byresi (pk1 | pk2)")
if not hydro:
_self.remove("(pkmol and hydro)")
if ((_self.select(tmp1,"?pk1",domain=tmp_domain)==1) and
(_self.select(tmp2,"?pk2",domain=tmp_domain)==1)):
if ((_self.select(tmp3,"(name CA,CH3 & nbr. ?pk1)",domain=tmp_domain)==1) and
(_self.select(tmp4,"(name CA,CH3 & nbr. ?pk2)",domain=tmp_domain)==1)):
_self.set_dihedral(tmp4,tmp2,tmp1,tmp3,180.0)
if hydro:
_self.h_fix(tmp2) # fix hydrogen position
if ss:
if amino_acid[0:3]!='pro':
if ((_self.select(tmp4,
"(!(resn PRO) & name C & nbr. (name CA & nbr. "+tmp2+"))",
domain=tmp_domain)==1) and
(_self.select(tmp3,
"(!(resn PRO) & name CA & nbr. "+tmp2+")",
domain=tmp_domain)==1)):
_self.set_dihedral( # PHI
tmp4, # C
tmp3, # CA
tmp2, # N
tmp1, # C
phi)
if ((_self.select(tmp4,"(name N & nbr. (name CA & nbr. "+tmp1+"))",
domain=tmp_domain)==1) and
(_self.select(tmp3,"(name CA & nbr. "+tmp1+")",domain=tmp_domain)==1)):
_self.set_dihedral( # PSI (n-1)
tmp2, # N
tmp1, # C
tmp3, # CA
tmp4, # N
psi)
sele = ("(name N & (byres nbr. %s) &! (byres %s))"% (tmp_connect,tmp_connect))
if _self.select(tmp1,sele,domain=tmp_domain):
_self.edit(tmp1)
if center:
_self.center(tmp1,animate=animate)
elif _self.count_atoms("elem C",domain=tmp_connect): # forward
tmp = [ None ]
_self.iterate(tmp_connect,"tmp[0]=resv", space={ 'tmp' : tmp })
tmp[0] = str(tmp[0]+1) # counting up
_self.alter(tmp_editor,"resi=tmp[0]",space={ 'tmp' : tmp})
_self.set_geometry(tmp_editor + " & name N", 3, 3) # make nitrogen planar
_self.fuse("(%s and name N)"%tmp_editor,tmp_connect,2)
_self.select(tmp_domain, "byresi (pk1 | pk2)")
if not hydro:
_self.remove("(pkmol and hydro)")
if (( _self.select(tmp1,"?pk1",domain=tmp_domain)==1) and
( _self.select(tmp2,"?pk2",domain=tmp_domain)==1)):
if ((_self.select(tmp3,"(name CA,CH3 & nbr. ?pk1)",domain=tmp_domain)==1) and
(_self.select(tmp4,"(name CA,CH3 & nbr. ?pk2)",domain=tmp_domain)==1)):
_self.set_dihedral(tmp4,tmp2,tmp1,tmp3,180.0)
if hydro:
_self.h_fix("pk1") # fix hydrogen position
if ss:
if hydro and amino_acid[0:3]=='nhh': # fix amide hydrogens
if ((_self.select(tmp3,"(name H1 & nbr. "+tmp1+")",domain=tmp_domain)==1) and
(_self.select(tmp4,"(name O & nbr. "+tmp2+")",domain=tmp_domain)==1)):
_self.set_dihedral(
tmp4, # O
tmp2, # C
tmp1, # N
tmp3, # H1
180)
if amino_acid[0:3]!='pro':
if ((_self.select(tmp3,"(name CA & nbr. "+tmp1+")",domain=tmp_domain)==1) and
(_self.select(tmp4,"(name C & nbr. (name CA & nbr. "+tmp1+"))",domain=tmp_domain)==1)):
_self.set_dihedral( # PHI
tmp2, # C
tmp1, # N
tmp3, # CA
tmp4, # C
phi)
if ((_self.select(tmp3,"(name CA & nbr. "+tmp2+")",domain=tmp_domain)==1) and
(_self.select(tmp4,"(name N & nbr. (name CA & nbr. "+tmp2+"))",domain=tmp_domain)==1)):
_self.set_dihedral( # PSI (n-1)
tmp4, # N
tmp3, # CA
tmp2, # C
tmp1, # N
psi)
sele = ("(name C & (byres nbr. %s) & !(byres %s))"% (tmp_connect,tmp_connect))
if _self.select(tmp1,sele,domain=tmp_domain):
_self.edit(tmp1)
if center:
_self.center(tmp1,animate=animate)
else:
_self.unpick()
elif _self.count_atoms("((%s) and elem H)"%selection):
print("Error: please pick a nitrogen or carbonyl carbon to grow from.")
_self.delete(tmp_wild)
raise QuietException
else:
print("Error: unable to attach fragment.")
_self.delete(tmp_wild)
raise QuietException
_self.delete(tmp_wild)
return r
_aa_codes = {
'A' : 'ala',
'B' : 'ace',
'C' : 'cys',
'D' : 'asp',
'E' : 'glu',
'F' : 'phe',
'G' : 'gly',
'H' : 'his',
'I' : 'ile',
'K' : 'lys',
'L' : 'leu',
'M' : 'met',
'N' : 'asn',
'P' : 'pro',
'Q' : 'gln',
'R' : 'arg',
'S' : 'ser',
'T' : 'thr',
'V' : 'val',
'W' : 'trp',
'Y' : 'tyr',
'Z' : 'nme',
}
_fab_codes = {
'peptide' : _aa_codes,
}
_pure_number = re.compile("[0-9]+")
def _fab(input,name,mode,resi,chain,segi,state,dir,hydro,ss,quiet,_self=cmd):
r = DEFAULT_ERROR
code = _fab_codes.get(mode,None)
quiet = int(quiet)
resi = int(resi)
state = int(state)
dir = int(dir)
hydro = int(hydro)
if hydro < 0:
hydro = not _self.get_setting_boolean("auto_remove_hydrogens")
seq_len = 0
if (mode == 'peptide') and is_string(input):
# '123/ ADC B/234/ AFCD' to [ '123/','A','D','C','B/234/','F','C','D' ]
frags = input.split()
input = []
for frag in frags:
if '/' in frag:
input.append(frag)
else:
seq_len = seq_len + len(frag)
input.extend(list(frag))
input.append("/") # breaks chain
if name is None:
name = _self.get_unused_name("obj")
elif name in _self.get_names():
_self.delete(name)
if mode in [ 'peptide' ]: # polymers
if (seq_len>99) and not quiet:
print(" Generating a %d residue peptide from sequence..."%seq_len)
input.reverse()
sequence = input
if code is not None:
while len(sequence):
while len(sequence) and '/' in sequence[-1]:
part = sequence.pop().split('/')
if len(part)>1:
if len(part[-2]):
resi = int(part[-2])
if len(part)>2:
chain = part[-3]
if len(part)>3:
segi = part[-4]
if len(sequence) and not _self.count_atoms("?pk1"): # new polymer segment
tmp_obj = _self.get_unused_name()
first = sequence.pop()
_self.fragment(code[first], tmp_obj)
if not hydro:
_self.remove(tmp_obj + ' and hydro')
_self.alter(tmp_obj,'resi="""%s""";chain="""%s""";segi="""%s"""'%(resi,chain,segi))
_self.create(name,tmp_obj+" or ?"+name,1,1,zoom=0)
tmp_sel = _self.get_unused_name()
if mode == 'peptide':
if dir>0:
_self.select(tmp_sel,"name C and "+tmp_obj)
resi = resi + 1
else:
_self.select(tmp_sel,"name N and "+tmp_obj)
resi = resi - 1
_self.edit(name+" in "+tmp_sel) # set the editor's pk1 selection
_self.delete(tmp_sel+" "+tmp_obj)
if mode == 'peptide':
while len(sequence):
if '/' in sequence[-1]:
_self.unpick() # break chain at this point
break
if not _self.count_atoms("?pk1"):
break
else:
attach_amino_acid("pk1",code[sequence.pop()],animate=0,ss=ss,hydro=hydro,_self=_self)
if dir>0:
resi = resi + 1
else:
resi = resi - 1
if not len(sequence):
r = DEFAULT_SUCCESS
if _self.get_setting_int('auto_zoom'):
_self.zoom(name)
return r
def fab(input,name=None,mode='peptide',resi=1,chain='',segi='',state=-1,
dir=1,hydro=-1,ss=0,async_=0,quiet=1,_self=cmd, **kwargs):
'''
DESCRIPTION
Build a peptide
ARGUMENTS
input = str: sequence in one-letter code
name = str: name of object to create {default: }
ss = int: Secondary structure 1=alpha helix, 2=antiparallel beta, 3=parallel beta, 4=flat
EXAMPLE
fab ACDEFGH
fab ACDEFGH, helix, ss=1
'''
async_ = int(kwargs.pop('async', async_))
if kwargs:
raise pymol.CmdException('unknown argument: ' + ', '.join(kwargs))
if async_ < 1:
r = _fab(input,name,mode,resi,chain,segi,
state,dir,hydro,ss,quiet,_self)
else:
fab_thread = threading.Thread(target=_fab, args=(input,name,mode,
resi,chain,
segi,state,dir,
hydro,ss,quiet,_self))
fab_thread.setDaemon(1)
fab_thread.start()
r = DEFAULT_SUCCESS
return r
def build_peptide(sequence,_self=cmd): # legacy
for aa in sequence:
attach_amino_acid("pk1",_aa_codes[aa])
|
py | b40e200e676425a22e383f2d3d22b795d2588331 | # Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
# Distributed MNIST on grid based on TensorFlow MNIST example
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def print_log(worker_num, arg):
print("{0}: {1}".format(worker_num, arg))
def main_tf(args, ctx):
from datetime import datetime
import math
import numpy
import tensorflow as tf
import time
worker_num = ctx.worker_num
job_name = ctx.job_name
task_index = ctx.task_index
# Parameters
IMAGE_PIXELS = 28
hidden_units = 128
# Get TF cluster and server instances
cluster, server = ctx.start_cluster_server(1, args.rdma)
# Create generator for Spark data feed
tf_feed = ctx.get_data_feed(args.mode == 'train')
def rdd_generator():
while not tf_feed.should_stop():
batch = tf_feed.next_batch(1)
if len(batch) == 0:
return
row = batch[0]
image = numpy.array(row[0]).astype(numpy.float32) / 255.0
label = numpy.array(row[1]).astype(numpy.int64)
yield (image, label)
if job_name == "ps":
server.join()
elif job_name == "worker":
# Assigns ops to the local worker by default.
with tf.device(tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % task_index,
cluster=cluster)):
# Dataset for input data
ds = tf.data.Dataset.from_generator(rdd_generator, (tf.float32, tf.float32), (tf.TensorShape([IMAGE_PIXELS * IMAGE_PIXELS]), tf.TensorShape([10]))).batch(args.batch_size)
iterator = ds.make_one_shot_iterator()
x, y_ = iterator.get_next()
# Variables of the hidden layer
hid_w = tf.Variable(tf.truncated_normal([IMAGE_PIXELS * IMAGE_PIXELS, hidden_units],
stddev=1.0 / IMAGE_PIXELS), name="hid_w")
hid_b = tf.Variable(tf.zeros([hidden_units]), name="hid_b")
tf.summary.histogram("hidden_weights", hid_w)
# Variables of the softmax layer
sm_w = tf.Variable(tf.truncated_normal([hidden_units, 10],
stddev=1.0 / math.sqrt(hidden_units)), name="sm_w")
sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
tf.summary.histogram("softmax_weights", sm_w)
x_img = tf.reshape(x, [-1, IMAGE_PIXELS, IMAGE_PIXELS, 1])
tf.summary.image("x_img", x_img)
hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
hid = tf.nn.relu(hid_lin)
y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
global_step = tf.train.get_or_create_global_step()
loss = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
tf.summary.scalar("loss", loss)
train_op = tf.train.AdagradOptimizer(0.01).minimize(
loss, global_step=global_step)
# Test trained model
label = tf.argmax(y_, 1, name="label")
prediction = tf.argmax(y, 1, name="prediction")
correct_prediction = tf.equal(prediction, label)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="accuracy")
tf.summary.scalar("acc", accuracy)
saver = tf.train.Saver()
summary_op = tf.summary.merge_all()
init_op = tf.global_variables_initializer()
# Create a "supervisor", which oversees the training process and stores model state into HDFS
logdir = ctx.absolute_path(args.model)
print("tensorflow model path: {0}".format(logdir))
summary_writer = tf.summary.FileWriter("tensorboard_%d" % worker_num, graph=tf.get_default_graph())
hooks = [tf.train.StopAtStepHook(last_step=args.steps)] if args.mode == "train" else []
with tf.train.MonitoredTrainingSession(master=server.target,
is_chief=(task_index == 0),
scaffold=tf.train.Scaffold(init_op=init_op, summary_op=summary_op, saver=saver),
checkpoint_dir=logdir,
hooks=hooks) as sess:
print("{} session ready".format(datetime.now().isoformat()))
# Loop until the session shuts down or feed has no more data
step = 0
while not sess.should_stop() and not tf_feed.should_stop():
# Run a training step asynchronously.
# See `tf.train.SyncReplicasOptimizer` for additional details on how to
# perform *synchronous* training.
if args.mode == "train":
_, summary, step = sess.run([train_op, summary_op, global_step])
if (step % 100 == 0):
print("{} step: {} accuracy: {}".format(datetime.now().isoformat(), step, sess.run(accuracy)))
if task_index == 0:
summary_writer.add_summary(summary, step)
else: # args.mode == "inference"
labels, preds, acc = sess.run([label, prediction, accuracy])
results = ["{} Label: {}, Prediction: {}".format(datetime.now().isoformat(), l, p) for l, p in zip(labels, preds)]
tf_feed.batch_results(results)
print("acc: {}".format(acc))
print("{} stopping MonitoredTrainingSession".format(datetime.now().isoformat()))
# WORKAROUND FOR https://github.com/tensorflow/tensorflow/issues/21745
# wait for all other nodes to complete (via done files)
done_dir = "{}/{}/done".format(ctx.absolute_path(args.model), args.mode)
print("Writing done file to: {}".format(done_dir))
tf.gfile.MakeDirs(done_dir)
with tf.gfile.GFile("{}/{}".format(done_dir, ctx.task_index), 'w') as done_file:
done_file.write("done")
for i in range(60):
if len(tf.gfile.ListDirectory(done_dir)) < len(ctx.cluster_spec['worker']):
print("{} Waiting for other nodes {}".format(datetime.now().isoformat(), i))
time.sleep(1)
else:
print("{} All nodes done".format(datetime.now().isoformat()))
break
|
py | b40e2092c89878431092defe2e413bfb651ae6c5 | """Authentication helper functions
"""
import os
from flask import request
from .jwt import jwt_manager
from flask_jwt_extended import jwt_required, get_jwt_identity
# Accept user name passed in Basic Auth header
# (password has to checkded before!)
ALLOW_BASIC_AUTH_USER = os.environ.get('ALLOW_BASIC_AUTH_USER', 'False') \
.lower() in ('t', 'true')
def auth_manager(app, api=None):
"""Authentication setup for Flask app"""
# Setup the Flask-JWT-Extended extension
return jwt_manager(app, api)
def optional_auth(fn):
"""Authentication view decorator"""
return jwt_required(optional=True)(fn)
def get_identity():
"""Get identity (username oder dict with username and groups)"""
return get_jwt_identity()
def get_username(identity):
"""Get username"""
if identity:
if isinstance(identity, dict):
username = identity.get('username')
else:
# identity is username
username = identity
else:
username = None
return username
def get_groups(identity):
"""Get user groups"""
groups = []
if identity:
if isinstance(identity, dict):
groups = identity.get('groups', [])
group = identity.get('group')
if group:
groups.append(group)
return groups
def get_auth_user():
"""Get identity or optional pre-authenticated basic auth user"""
identity = get_identity()
if not identity and ALLOW_BASIC_AUTH_USER:
auth = request.authorization
if auth:
# We don't check password, already authenticated!
identity = auth.username
return identity
|
py | b40e21148e8e7efa021d4e8d56d214d691db6a0f | import math
import statistics
import warnings
import numpy as np
from hmmlearn.hmm import GaussianHMM
from sklearn.model_selection import KFold
from asl_utils import combine_sequences
class ModelSelector(object):
'''
base class for model selection (strategy design pattern)
'''
def __init__(self, all_word_sequences: dict, all_word_Xlengths: dict, this_word: str,
n_constant=3,
min_n_components=2, max_n_components=10,
random_state=14, verbose=False):
self.words = all_word_sequences
self.hwords = all_word_Xlengths
self.sequences = all_word_sequences[this_word]
self.X, self.lengths = all_word_Xlengths[this_word]
self.this_word = this_word
self.n_constant = n_constant
self.min_n_components = min_n_components
self.max_n_components = max_n_components
self.random_state = random_state
self.verbose = verbose
def select(self):
raise NotImplementedError
def base_model(self, num_states):
# with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# warnings.filterwarnings("ignore", category=RuntimeWarning)
try:
hmm_model = GaussianHMM(n_components=num_states, covariance_type="diag", n_iter=1000,
random_state=self.random_state, verbose=False).fit(self.X, self.lengths)
if self.verbose:
print("model created for {} with {} states".format(self.this_word, num_states))
return hmm_model
except:
if self.verbose:
print("failure on {} with {} states".format(self.this_word, num_states))
return None
class SelectorConstant(ModelSelector):
""" select the model with value self.n_constant
"""
def select(self):
""" select based on n_constant value
:return: GaussianHMM object
"""
best_num_components = self.n_constant
return self.base_model(best_num_components)
class SelectorBIC(ModelSelector):
""" select the model with the lowest Bayesian Information Criterion(BIC) score
http://www2.imm.dtu.dk/courses/02433/doc/ch6_slides.pdf
Bayesian information criteria: BIC = -2 * logL + p * logN
"""
def select(self):
""" select the best model for self.this_word based on
BIC score for n between self.min_n_components and self.max_n_components
:return: GaussianHMM object
"""
warnings.filterwarnings("ignore", category=DeprecationWarning)
# implement model selection based on BIC scores
# Best model -> with default value as n_constant
best_n = self.n_constant
# Minimize bic score; initialize to +infinity
bic_score = math.inf
try:
for n in range(self.min_n_components, self.max_n_components+1):
# BIC = -2 * logL + p * logN
# logL -> log likelihood of fitted model
# p -> number of parameters
# N -> number of data points
model = self.base_model(n)
logL = model.score(self.X, self.lengths)
p = n**2 + 2*n*model.n_features - 1
score = 2*logL + p*(math.log(n))
if score < bic_score:
best_n = n
bic_score = score
except Exception as e:
if self.verbose:
print(e)
return self.base_model(best_n)
class SelectorDIC(ModelSelector):
''' select best model based on Discriminative Information Criterion
Biem, Alain. "A model selection criterion for classification: Application to hmm topology optimization."
Document Analysis and Recognition, 2003. Proceedings. Seventh International Conference on. IEEE, 2003.
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.58.6208&rep=rep1&type=pdf
https://pdfs.semanticscholar.org/ed3d/7c4a5f607201f3848d4c02dd9ba17c791fc2.pdf
DIC = log(P(X(i)) - 1/(M-1)SUM(log(P(X(all but i))
'''
def select(self):
warnings.filterwarnings("ignore", category=DeprecationWarning)
# implement model selection based on DIC scores
# DIC = log(P(X(i)) - 1/(M-1)SUM(log(P(X(all but i))
# log(P(X(i))) -> log likelihood score of model on word i
# log(P(X(all but i))) -> log likelihood score of model on all words other than word i
# M -> total words
# list of logL of all words
log_likelihood_all = list()
# best model with default value as n_constant
best_n = self.n_constant
# maximize dic score; initialize to -infinity
dic_score = -math.inf
try:
for n in range(self.min_n_components, self.max_n_components+1):
model = self.base_model(n)
log_likelihood_all.append(model.score(self.X, self.lengths))
M = self.max_n_components - self.min_n_components + 1
logL_sum = sum(log_likelihood_all)
for n in range(self.min_n_components, self.max_n_components+1):
logL = log_likelihood_all[n - self.min_n_components]
logL_others = logL_sum - logL
score = logL - (logL_others / (M-1))
if score > dic_score:
dic_score = score
best_n = n
except Exception as e:
if self.verbose:
print(e)
return self.base_model(best_n)
class SelectorCV(ModelSelector):
''' select best model based on average log Likelihood of cross-validation folds
'''
def select(self):
warnings.filterwarnings("ignore", category=DeprecationWarning)
# implement model selection using CV
mean_scores = []
split_method = KFold()
best_mean_score = -math.inf
best_n = self.n_constant
try:
for n in range(self.min_n_components, self.max_n_components+1):
# Fold and calculate model mean scores
fold_scores = []
for train_idx, test_idx in split_method.split(self.sequences):
# Training sequences
train_X, train_lengths = combine_sequences(train_idx, self.sequences)
# Test sequences
test_X, test_lengths = combine_sequences(test_idx, self.sequences)
# Run model
hmm_model = GaussianHMM(n_components=n, covariance_type="diag", n_iter=1000,
random_state=self.random_state, verbose=False).fit(train_X, train_lengths)
# Record each model score
fold_scores.append(hmm_model.score(test_X, test_lengths))
# Compute mean of all fold scores
mean_score = np.mean(fold_scores)
if mean_score > best_mean_score:
best_mean_score = mean_score
best_n = n
except Exception as e:
if self.verbose:
print(e)
return self.base_model(best_n)
|
py | b40e2131fc98f952e4a00845011dbebff7a9ed37 | """The Model Configuration file contains configuration for training the model, conducting data study, KMC Generation, Benchmarking and Transfer Learning
Model Training Parameters
:param model_parameters['model_type']: The type of model to be used for training, currently defaults to 3D CNN
:type model_parameters['model_type']: str (required)
:param model_parameters['output_type']: (regression, classification) The output type of the model used to initialize the output layer, currently defaults to regression
:type model_parameters['output_type']: str (required)
:param model_parameters['batch_size']: The batch size while training, can be tuned based on the hardware specifications, currently defaults to 32
:type model_parameters['batch_size']: int (required)
:param model_parameters['epocs']: The number of epocs the model is to be trained for, currently defaults to 150
:type model_parameters['epocs']: int (required)
:param model_parameters['split_ratio']: Split Ratio for train and validation
:type model_parameters['split_ratio']: float (required)
:param model_parameters['optimizer']: The optimizer to be used for model training, refer https://keras.io/optimizers/ for more information, currently defaults to adam
:type model_parameters['optimizer']: keras.optimizer (required)
:param model_parameters['loss_func']: The loss function to be optimized while model training, refer https://keras.io/losses/ for more information, currently defaults to Mean Squared Error (MSE)
:type model_parameters['loss_func']: keras.losses (required)
:param model_parameters['regularizer_coeff']: The regularizing coefficient to be used for L2 norm regularization of the fully connected layer, refer https://keras.io/regularizers/ for more information currently defaults to 0.1
:type model_parameters['regularizer_coeff']: float (required)
:param model_parameters['activate_tensorboard']: Tensorboard activation flag https://www.tensorflow.org/tensorboard, currently set to 0, changes to 1 for activating tensorbiard, Warning: There can be some compatibility issues with different Tensorflow and Cuda Toolkit Versions
:type model_parameters['loss_func']: int (required)
Data Study Parameters
:param data_study_params['batch_size']: The batch size while conducting data study, can be tuned based on the hardware specifications, currently defaults to 32
:type data_study_params['batch_size']: int (required)
:param data_study_params['epocs']: The number of epocs the model is to be trained for, currently defaults to 50
:type data_study_params['epocs']: int (required)
:param data_study_params['split_ratio']: Split Ratio for train and validation during data study
:type data_study_params['split_ratio']: float (required)
:param data_study_params['min_train_samples']: Minimum train Samples for data study, currently defaults to 100
:type data_study_params['min_train_samples']: int (required)
:param data_study_params['max_train_samples']: Maximum train samples for data study, dataset size is the maximum value
:type data_study_params['max_train_samples']: int (required)
:param data_study_params['train_increment']: Increment in the train size with each iteration, currently defaults to 100
:type data_study_params['train_increment']: int (required)
Key Measurment Characteristics Generation Parameters
:param kmc_params['tree_based_model']: The model to be used while generating feature importance, refer: https://xgboost.readthedocs.io/en/latest/R-package/discoverYourData.html#measure-feature-importance for more details, currently defaults to xgb, random forests can also be used
:type kmc_params['tree_based_model']: str (required)
:param kmc_params['tree_based_model']: The importance criteria to be used, currently defaults to gini index
:type kmc_params['tree_based_model']: str (required)
:param kmc_params['split_ratio']: Split Ratio for train and validation during data study
:type kmc_params['split_ratio']: float (required)
:param kmc_params['save_model']: Flag to save the model, Currently defaults to 0, change to 1 if model needs to be saved
:type kmc_params['save_model']: int (required)
:param kmc_params['plot_kmc']: Flag to plot the KMC, Currently defaults to 1, change to 0 if no plotting is required
:type kmc_params['plot_kmc']: int (required)
Benchmarking Parameters
:param bm_params['max_models']: The maximum number of models to be used for benchmarking, currently defaults to 10
:type bm_params['max_models']: int (required)
:param bm_params['runs']: Number of benchmarking runs to be conducted
:type bm_params['runs']: int (required)
Transfer Learning Parameters
:param transfer_learning['tl_type']: The type of transfer learning to be used (full_fine_tune, variable_lr, feature_extractor) currently defaults to full_fine_tune
:type transfer_learning['tl_type']: str (required)
:param transfer_learning['tl_base']: The type of base model (3D CNN Architecture) to be used (pointdevnet, voxnet, 3d-UNet), currently defaults to PointdevNet
:type transfer_learning['tl_base']: str (required)
:param transfer_learning['tl_app']: The application of the transfer learning model (classification, regression), currently defaults to regression
:type transfer_learning['tl_app']: str (required)
:param transfer_learning['conv_layer_m']: The learning rate multiplier for convolution layers (only needed when tl_type is variable_lr), defaults to 0.1 (10% of the network Learning Rate)
:type transfer_learning['conv_layer_m']: float (required)
:param transfer_learning['dense_layer_m']: The learning rate multiplier for dense layers (only needed when tl_type is variable_lr), defaults to 1 (100% of the network Learning Rate)
:type transfer_learning['dense_layer_m']: float (required)
"""
model_parameters = {
'model_type':'Bayesian 3D Convolution Neural Network', #other option: 'Bayesian 3D Convolution Neural Network'
'learning_type':'Basic', # use 'Transfer Learning' if transfer learning is to be leveraged
'output_type':'regression',
'batch_size': 32,
'epocs':300,
'split_ratio': 0.2,
'optimizer':'adam',
'loss_func':'mse',
'regularizer_coeff': 0.01,
'activate_tensorboard':0
}
cae_sim_params = {
'simulation_platform':'MatLab',
'simulation_engine':'VRM',
'max_run_length':15,
'cae_input_path': 'check',
'cae_output_path':'check',
#case_study parameter imported from assembly_configration
}
encode_decode_params ={
'model_depth':4,
'inital_filter_dim':16,
'kcc_sublist':0,#[0,1,2,3,4,5,6,7,8,9,10,11] use a list in case only a selected sublist of KCCs have to be used: 0 means all KCCs
'output_heads':2
}
data_study_params = {
'batch_size':16,
'epocs':3,
'min_train_samples':400,
'train_increment':200,
'max_train_samples':5000,
'split_ratio':0.2,
'tl_flag':0
}
kmc_params={
'tree_based_model':'xgb',
'importance_creteria':'gini',
'split_ratio':0.2,
'save_model':0,
'plot_kmc':1,
}
bm_params={
'max_models':10,
'runs':3,
'split_ratio': 0.2
}
transfer_learning={
'tl_type':'full_fine_tune', #options 'full_fine_tune', variable_lr', 'feature_extractor'
'tl_base':'model_pointnet_64_halo.h5',
'tl_app':'halo_deploy',
'conv_layer_m':0.1,
'dense_layer_m':1,
} |
py | b40e2189aed30777fe55d2b4674312fcb1743786 | #!/usr/bin/env python3
"""Show text or graphics on a display connected to a Raspberry Pi.
This current version only supports the PiOled (ssd1306 chip on the I2C bus),
with a fallback to using xwindows apps xmessage and feh to display text and
images, respectively."""
# Based on ssd1306_bro_stats, original authors: Tony DiCola & James DeVito
# Copyright (c) 2017 Adafruit Industries
# References:
# https://learn.adafruit.com/adafruit-pioled-128x32-mini-oled-for-raspberry-pi/usage
# https://learn.adafruit.com/pages/15678/elements/3024322/download
# Copyright 2019
# pi_show author: William Stearns <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# This example is for use on (Linux) computers that are using CPython with
# Adafruit Blinka to support CircuitPython libraries. CircuitPython does
# not support PIL/pillow (python imaging library)!
#xmessage: debian/ubuntu package: x11-utils , rpm package: xorg-x11-apps, mac ports package: xmessage
#feh: (all) package: feh
import os
import sys
import time
import subprocess
try:
from board import SCL, SDA
except ImportError:
sys.stderr.write('Unable to load board module; have you run "sudo pip3 install Adafruit-Blinka" ?\n')
#sys.exit(1)
try:
import busio
except ImportError:
sys.stderr.write('Unable to load busio module; have you run "sudo pip3 install Adafruit-Blinka" ?\n')
#sys.exit(1)
try:
from PIL import Image, ImageDraw, ImageFont
except ImportError:
sys.stderr.write('Unable to load PIL module; have you run "sudo apt-get install python3-pil" ?\n')
#sys.exit(1)
#Move into board detect function
try:
import adafruit_ssd1306
except ImportError:
sys.stderr.write('Unable to load adafruit_ssd1306 module; have you run "sudo pip3 install adafruit-circuitpython-ssd1306" ?\n')
#sys.exit(1)
def warn(warn_message):
"""On a non-fatal error, notify user."""
sys.stderr.write(warn_message + '\n')
def debug(debug_message):
"""On a non-fatal error, notify user."""
if cl_args['debug']:
sys.stderr.write(debug_message + '\n')
def fail(fail_message):
"""On a fatal error, notify user and exit."""
sys.stderr.write(fail_message + ' Exiting.\n')
sys.exit(1)
def cmd_output(cmd_to_run):
"""Get the stdout from a command run from bash."""
return subprocess.check_output(cmd_to_run, shell=True).decode("utf-8")
def load_drawing_h(ld_width, ld_height):
"""Initializes the display and Returns the handles needed to access the display. Only called once at the beginning."""
disp_h = None
image_h = None
draw_h = None
if 'busio' in sys.modules and 'board' in sys.modules and 'adafruit_ssd1306' in sys.modules:
# Create the I2C interface.
i2c = busio.I2C(SCL, SDA)
# Create the SSD1306 OLED class.
# The first two parameters are the pixel width and pixel height. Change these
# to the right size for your display!
disp_h = adafruit_ssd1306.SSD1306_I2C(ld_width, ld_height, i2c)
# Clear display.
disp_h.fill(0)
disp_h.show()
if 'PIL' in sys.modules:
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
image_h = Image.new('1', (ld_width, ld_height))
# Get drawing object to draw on image.
draw_h = ImageDraw.Draw(image_h)
# Draw a black filled box to clear the image.
draw_h.rectangle((0, 0, ld_width, ld_height), outline=0, fill=0)
return disp_h, image_h, draw_h
def load_font_h(font_file):
"""Load a specific font and return the handle. If no font is specified (None), try to load DejaVuSans
and if that fails, load the default font."""
font_h = None
if 'PIL' in sys.modules:
if font_file and os.path.exists(font_file):
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
#Example: '/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf'
font_h = ImageFont.truetype(font_file, 9)
else:
if os.path.exists('/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf'):
#Seriously nicer font than the default for this small display.
font_h = ImageFont.truetype('/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf', 9)
else:
# Load default font.
font_h = ImageFont.load_default()
return font_h
def send_text(display_module_name, disp_h, image_h, draw_h, font_h, text_l, max_lines, max_chars, pad_pixels, display_time):
"""Pushes the line(s) of text in the text_l list to the handles provided.
At most max_lines of text, and at most max_chars per line, are shown."""
if display_module_name == 'xwindows' and xmessage_installed:
xmessage_proc = subprocess.Popen(['xmessage', "\n".join(text_l)])
time.sleep(display_time)
xmessage_proc.terminate()
else:
# Draw a black filled box to clear the image.
draw_h.rectangle((0, 0, disp_h.width, disp_h.height), outline=0, fill=0)
for line_count in range(0, min(max_lines, len(text_l))):
draw_h.text((0, pad_pixels + 8 * line_count), text_l[line_count][0:max_chars], font=font_h, fill=255)
disp_h.image(image_h)
disp_h.show()
try:
time.sleep(display_time)
except KeyboardInterrupt:
sys.exit(0)
def send_image(display_module_name, disp_h, image_file, display_time):
"""Draws the image on the screen. image_file must be a full path to the file."""
if os.path.exists(image_file):
if display_module_name == 'xwindows' and feh_installed:
feh_proc = subprocess.Popen(['feh', image_file])
time.sleep(display_time)
feh_proc.terminate()
else:
image_h = Image.open(image_file).convert('1')
try:
disp_h.image(image_h)
except ValueError:
#Could be that the supplied image has not been converted correctly, so we'll force a conversion
warn(image_file + ' is not in ' + str(disp_h.width) + 'x' + str(disp_h.height) + 'x1 format, converting.')
image_h = Image.open(image_file).resize((disp_h.width, disp_h.height), Image.ANTIALIAS).convert('1')
disp_h.image(image_h)
disp_h.show()
try:
time.sleep(display_time)
except KeyboardInterrupt:
sys.exit(0)
else:
warn(image_file + " unreadable, skipping.")
def sorted_dir_list(requested_start_object):
"""Returns a sorted list of files (and only files) in a directory, with the directory name prepended. Not recursive.
If the requested object is a file, return that."""
justfile_list = []
if os.path.isfile(requested_start_object):
justfile_list.append(requested_start_object)
elif os.path.isdir(requested_start_object):
for one_obj in os.listdir(requested_start_object):
if os.path.isfile(requested_start_object + '/' + one_obj):
justfile_list.append(requested_start_object + '/' + one_obj)
justfile_list.sort()
return justfile_list
def locate_display():
"""Detect the display and return its modules name and dimensions."""
#FIXME - add param for forced display
detected_display = None
x_pixels = 0
y_pixels = 0
#i2c appears to be enabled, check for i2c displays
if os.path.exists('/dev/i2c-1') and os.path.exists('/usr/sbin/i2cdetect') and cmd_output('i2cdetect -y 1 60 60 | grep "^30:.*3c"'):
detected_display = 'ssd1306'
x_pixels = 128
y_pixels = 32 #Are there other boards with id 0x3c with 64 pixels?
#else:
# fail('ssd1306 not detected on i2c bus. Is the PiOled plugged in on the pins closest to the corner on the 40 pin interface?')
elif 'DISPLAY' in os.environ and (feh_installed or xmessage_installed):
detected_display = 'xwindows'
x_pixels = 1024
y_pixels = 768
else:
#FIXME - add reformatted text for what the user needs to do to enable some display
fail("/dev/i2c-1 does not exist. If you are using an I2C display, please run raspi-config, go to Interfacing options or Advanced, enable I2C, reboot, and rerun this script.")
#fail("/usr/sbin/i2cdetect does not exist. Please install i2c-tools with 'sudo apt-get install i2c-tools' .")
return detected_display, x_pixels, y_pixels
pi_show_version = '0.3.5'
default_show_dir = '/var/toshow/'
default_delay = '8'
xmessage_installed = os.path.exists('/opt/X11/bin/xmessage') or os.path.exists('/bin/xmessage') or os.path.exists('/usr/bin/xmessage')
feh_installed = os.path.exists('/usr/bin/feh') or os.path.exists('/bin/feh') or os.path.exists('/opt/local/bin/feh')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='pi_show version ' + str(pi_show_version))
parser.add_argument('-w', '--wait', help='Time to show a screen before moving to the next (default: ' + str(default_delay) + ' ).', required=False, default=default_delay)
parser.add_argument('-d', '--directory', help='Directory that holds text files/images to be shown (default: ' + str(default_show_dir) + ' ).', required=False, default=default_show_dir)
parser.add_argument('-o', '--once', help='Show each requested object once, then exit (default: continuous loop)', required=False, default=False, action='store_true')
parser.add_argument('-s', '--stdin', help='Show text handed on stdin (lines are only read a single time)', required=False, default=False, action='store_true')
parser.add_argument('-f', '--font', help='Full path to truetype font file (default: DejaVuSans)', required=False, default=None)
parser.add_argument('--debug', help='Show additional debugging information on stderr', required=False, default=False, action='store_true')
cl_args = vars(parser.parse_args())
nap_time = int(cl_args['wait'])
show_dir = str(cl_args['directory'])
(display_module, x_size, y_size) = locate_display()
(disp, image, draw) = load_drawing_h(x_size, y_size)
if cl_args['font'] and not os.path.exists(cl_args['font']):
fail('No such font file')
font = load_font_h(cl_args['font'])
padding = -2
#top = padding
#bottom = disp.height - padding
# Move left to right keeping track of the current x position for drawing shapes.
while True:
if cl_args['stdin']:
#debug('Reading from stdin')
lines = sys.stdin.readlines()
lines = [x.strip() for x in lines]
#debug('About to show: ' + '__'.join(lines))
send_text(display_module, disp, image, draw, font, lines, y_size//8, x_size//6, padding, nap_time)
for full_file in sorted_dir_list(show_dir):
if os.path.exists(full_file): #File may have been deleted since the directory listing was taken, ignore if so.
if full_file.lower().endswith(('.txt')):
with open(full_file) as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
send_text(display_module, disp, image, draw, font, lines, y_size//8, x_size//6, padding, nap_time)
else:
send_image(display_module, disp, full_file, nap_time)
if cl_args['once']:
sys.exit(0)
|
py | b40e21e8734a829033a9956b1c1c3bf0c2fa58d9 | from decimal import Decimal
import getpass
import datetime
import logging
from electrum_mue import WalletStorage, Wallet
from electrum_mue.util import format_satoshis
from electrum_mue.bitcoin import is_address, COIN, TYPE_ADDRESS
from electrum_mue.transaction import TxOutput
from electrum_mue.network import TxBroadcastError, BestEffortRequestFailed
from electrum_mue.logging import console_stderr_handler
def _(x): return x # i18n
# minimal fdisk like gui for console usage
# written by rofl0r, with some bits stolen from the text gui (ncurses)
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists:
print("Wallet not found. try 'electrum-mue create'")
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
self.done = 0
self.last_balance = ""
console_stderr_handler.setLevel(logging.CRITICAL)
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.wallet = Wallet(storage)
self.wallet.start_network(self.network)
self.contacts = self.wallet.contacts
self.network.register_callback(
self.on_network, ['wallet_updated', 'network_updated', 'banner'])
self.commands = [_("[h] - displays this help text"),
_("[i] - display transaction history"),
_("[o] - enter payment order"),
_("[p] - print stored payment order"),
_("[s] - send stored payment order"),
_("[r] - show own receipt addresses"),
_("[c] - display contacts"),
_("[b] - print server banner"),
_("[q] - quit")]
self.num_commands = len(self.commands)
def on_network(self, event, *args):
if event in ['wallet_updated', 'network_updated']:
self.updated()
elif event == 'banner':
self.print_banner()
def main_command(self):
self.print_balance()
c = input("enter command: ")
if c == "h":
self.print_commands()
elif c == "i":
self.print_history()
elif c == "o":
self.enter_order()
elif c == "p":
self.print_order()
elif c == "s":
self.send_order()
elif c == "r":
self.print_addresses()
elif c == "c":
self.print_contacts()
elif c == "b":
self.print_banner()
elif c == "n":
self.network_dialog()
elif c == "e":
self.settings_dialog()
elif c == "q":
self.done = 1
else:
self.print_commands()
def updated(self):
s = self.get_balance()
if s != self.last_balance:
print(s)
self.last_balance = s
return True
def print_commands(self):
self.print_list(self.commands, "Available commands")
def print_history(self):
width = [20, 40, 14, 14]
delta = (80 - sum(width) - 4)/3
format_str = "%"+"%d" % width[0]+"s"+"%"+"%d" % (width[1]+delta)+"s"+"%" \
+ "%d" % (width[2]+delta)+"s"+"%"+"%d" % (width[3]+delta)+"s"
messages = []
for tx_hash, tx_mined_status, delta, balance in reversed(self.wallet.get_history()):
if tx_mined_status.conf:
timestamp = tx_mined_status.timestamp
try:
time_str = datetime.datetime.fromtimestamp(
timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "unknown"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(tx_hash)
messages.append(format_str % (time_str, label, format_satoshis(
delta, whitespaces=True), format_satoshis(balance, whitespaces=True)))
self.print_list(messages[::-1], format_str %
(_("Date"), _("Description"), _("Amount"), _("Balance")))
def print_balance(self):
print(self.get_balance())
def get_balance(self):
if self.wallet.network.is_connected():
if not self.wallet.up_to_date:
msg = _("Synchronizing...")
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f " % (Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]" % (Decimal(u) / COIN)
if x:
msg += " [%f unmatured]" % (Decimal(x) / COIN)
else:
msg = _("Not connected")
return(msg)
def print_contacts(self):
messages = map(lambda x: "%20s %45s " %
(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %25s " % ("Key", "Value"))
def print_addresses(self):
messages = map(lambda addr: "%30s %30s " % (
addr, self.wallet.labels.get(addr, "")), self.wallet.get_addresses())
self.print_list(messages, "%19s %25s " % ("Address", "Label"))
def print_order(self):
print("send order to " + self.str_recipient + ", amount: " + self.str_amount
+ "\nfee: " + self.str_fee + ", desc: " + self.str_description)
def enter_order(self):
self.str_recipient = input("Pay to: ")
self.str_description = input("Description : ")
self.str_amount = input("Amount: ")
self.str_fee = input("Fee: ")
def send_order(self):
self.do_send()
def print_banner(self):
for i, x in enumerate(self.wallet.network.banner.split('\n')):
print(x)
def print_list(self, lst, firstline):
lst = list(lst)
self.maxpos = len(lst)
if not self.maxpos:
return
print(firstline)
for i in range(self.maxpos):
msg = lst[i] if i < len(lst) else ""
print(msg)
def main(self):
while self.done == 0:
self.main_command()
def do_send(self):
if not is_address(self.str_recipient):
print(_('Invalid MonetaryUnit address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
print(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
print(_('Invalid Fee'))
return
if self.wallet.has_password():
password = self.password_dialog()
if not password:
return
else:
password = None
c = ""
while c != "y":
c = input("ok to send (y/n)?")
if c == "n":
return
try:
tx = self.wallet.mktx([TxOutput(TYPE_ADDRESS, self.str_recipient, amount)],
password, self.config, fee)
except Exception as e:
print(str(e))
return
if self.str_description:
self.wallet.labels[tx.txid()] = self.str_description
print(_("Please wait..."))
try:
self.network.run_from_another_thread(
self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
print(msg)
except BestEffortRequestFailed as e:
msg = repr(e)
print(msg)
else:
print(_('Payment sent.'))
# self.do_clear()
# self.update_contacts_tab()
def network_dialog(self):
print("use 'electrum-mue setconfig server/proxy' to change your network settings")
return True
def settings_dialog(self):
print("use 'electrum-mue setconfig' to change your settings")
return True
def password_dialog(self):
return getpass.getpass()
# XXX unused
def run_receive_tab(self, c):
# if c == 10:
# out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
return
def run_contacts_tab(self, c):
pass
|
py | b40e21f394f4b34daf2d5543856ad12c0a26c351 | import asyncio
from samt import Bot, Answer, Context, Mode
marv = Bot()
@marv.default_answer
def default():
return 'unknown', Context.get('message').text
@marv.answer("/start")
async def start():
return Answer('greeting', Context.get('user'))
@marv.answer("Guten Tag")
def guten_tag():
a = Answer('greeting', Context.get('user'))
a.language_feature = False
return a
if __name__ == "__main__":
marv.listen()
|
py | b40e237629314fadc3ed07fc6d0674f3e270c824 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder import service
from cinder.tests.unit.api import fakes
from cinder.tests.unit import utils as tests_utils
from cinder.tests.unit import volume as base
CONF = cfg.CONF
class VolumeCleanupTestCase(base.BaseVolumeTestCase):
MOCK_WORKER = False
def setUp(self):
super(VolumeCleanupTestCase, self).setUp()
self.service_id = 1
self.mock_object(service.Service, 'service_id', self.service_id)
self.patch('cinder.volume.utils.clear_volume', autospec=True)
def _assert_workers_are_removed(self):
workers = db.worker_get_all(self.context, read_deleted='yes')
self.assertListEqual([], workers)
@mock.patch('os.listdir')
def test_init_host_clears_uploads_available_volume(self, mock_listdir):
"""init_host will clean an available volume stuck in uploading."""
mock_listdir.return_value = []
volume = tests_utils.create_volume(self.context, status='uploading',
size=0, host=CONF.host)
db.worker_create(self.context, resource_type='Volume',
resource_id=volume.id, status=volume.status,
service_id=self.service_id)
self.volume.init_host(service_id=service.Service.service_id)
volume.refresh()
self.assertEqual("available", volume.status)
self._assert_workers_are_removed()
@mock.patch('cinder.manager.CleanableManager.init_host')
def test_init_host_clears_uploads_in_use_volume(self, init_host_mock):
"""init_host will clean an in-use volume stuck in uploading."""
volume = tests_utils.create_volume(self.context, status='uploading',
size=0, host=CONF.host)
db.worker_create(self.context, resource_type='Volume',
resource_id=volume.id, status=volume.status,
service_id=self.service_id)
fake_uuid = fakes.get_fake_uuid()
tests_utils.attach_volume(self.context, volume.id, fake_uuid,
'fake_host', '/dev/vda')
self.volume.init_host(service_id=mock.sentinel.service_id)
init_host_mock.assert_called_once_with(
service_id=mock.sentinel.service_id, added_to_cluster=None)
volume.refresh()
self.assertEqual("in-use", volume.status)
self._assert_workers_are_removed()
@mock.patch('os.listdir')
@mock.patch('cinder.image.image_utils.cleanup_temporary_file')
def test_init_host_clears_downloads(self, mock_cleanup_tmp_file,
mock_listdir):
"""Test that init_host will unwedge a volume stuck in downloading."""
mock_listdir.return_value = []
volume = tests_utils.create_volume(self.context, status='downloading',
size=0, host=CONF.host)
db.worker_create(self.context, resource_type='Volume',
resource_id=volume.id, status=volume.status,
service_id=self.service_id)
mock_clear = self.mock_object(self.volume.driver, 'clear_download')
self.volume.init_host(service_id=service.Service.service_id)
self.assertEqual(1, mock_clear.call_count)
self.assertEqual(volume.id, mock_clear.call_args[0][1].id)
volume.refresh()
self.assertEqual("error", volume['status'])
mock_cleanup_tmp_file.assert_called_once_with(CONF.host)
self.volume.delete_volume(self.context, volume=volume)
self._assert_workers_are_removed()
@mock.patch('os.listdir')
@mock.patch('cinder.image.image_utils.cleanup_temporary_file')
def test_init_host_resumes_deletes(self, mock_cleanup_tmp_file,
mock_listdir):
"""init_host will resume deleting volume in deleting status."""
mock_listdir.return_value = []
volume = tests_utils.create_volume(self.context, status='deleting',
size=0, host=CONF.host)
db.worker_create(self.context, resource_type='Volume',
resource_id=volume.id, status=volume.status,
service_id=self.service_id)
self.volume.init_host(service_id=service.Service.service_id)
self.assertRaises(exception.VolumeNotFound, db.volume_get,
context.get_admin_context(), volume.id)
mock_cleanup_tmp_file.assert_called_once_with(CONF.host)
self._assert_workers_are_removed()
@mock.patch('os.listdir')
@mock.patch('cinder.image.image_utils.cleanup_temporary_file')
def test_create_volume_fails_with_creating_and_downloading_status(
self, mock_cleanup_tmp_file, mock_listdir):
"""Test init_host_with_service in case of volume.
While the status of volume is 'creating' or 'downloading',
volume process down.
After process restarting this 'creating' status is changed to 'error'.
"""
mock_listdir.return_value = []
for status in ('creating', 'downloading'):
volume = tests_utils.create_volume(self.context, status=status,
size=0, host=CONF.host)
db.worker_create(self.context, resource_type='Volume',
resource_id=volume.id, status=volume.status,
service_id=self.service_id)
self.volume.init_host(service_id=service.Service.service_id)
volume.refresh()
self.assertEqual('error', volume['status'])
self.volume.delete_volume(self.context, volume)
self.assertTrue(mock_cleanup_tmp_file.called)
self._assert_workers_are_removed()
@mock.patch('os.listdir')
def test_create_snapshot_fails_with_creating_status(self, mock_listdir):
"""Test init_host_with_service in case of snapshot.
While the status of snapshot is 'creating', volume process
down. After process restarting this 'creating' status is
changed to 'error'.
"""
volume = tests_utils.create_volume(self.context,
**self.volume_params)
snapshot = tests_utils.create_snapshot(
self.context,
volume.id,
status=fields.SnapshotStatus.CREATING)
db.worker_create(self.context, resource_type='Snapshot',
resource_id=snapshot.id, status=snapshot.status,
service_id=self.service_id)
self.volume.init_host(service_id=service.Service.service_id)
snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot.id)
self.assertEqual(fields.SnapshotStatus.ERROR, snapshot_obj.status)
self.assertEqual(service.Service.service_id,
self.volume.service_id)
self._assert_workers_are_removed()
self.volume.delete_snapshot(self.context, snapshot_obj)
self.volume.delete_volume(self.context, volume)
@mock.patch('os.listdir')
def test_init_host_clears_deleting_snapshots(self, mock_listdir):
"""Test that init_host will delete a snapshot stuck in deleting."""
mock_listdir.return_value = []
volume = tests_utils.create_volume(self.context, status='deleting',
size=1, host=CONF.host)
snapshot = tests_utils.create_snapshot(self.context,
volume.id, status='deleting')
db.worker_create(self.context, resource_type='Volume',
resource_id=volume.id, status=volume.status,
service_id=self.service_id)
self.volume.init_host(service_id=self.service_id)
self.assertRaises(exception.VolumeNotFound, volume.refresh)
self.assertRaises(exception.SnapshotNotFound, snapshot.refresh)
|
py | b40e2538e7eca239f3b41df3368718122f54c302 | # Copyright (c) Open-MMLab. All rights reserved.
import os
import json
import tempfile
import warnings
from typing import Optional
from argparse import Namespace
from addict import Dict
from ..utils import check_file
BASE_KEY = "_base_"
RESERVED_KEYS = ["filename", "text"]
class ConfigDict(Dict):
r"""ConfigDict based on Dict, which use to convert the config
file into config dict
"""
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError(
f"`{self.__class__.__name__}` object has no attribute `{name}`"
)
except Exception as e:
ex = e
else:
return value
raise ex
class Config(object):
r"""A facility for config and config files.
It supports common file formats as configs: python/json/yaml. The interface
is the same as a dict object and also allows access config values as
attributes.
Example:
>>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
>>> cfg.a
1
>>> cfg.b
{"b1": [0, 1]}
>>> cfg.b.b1
[0, 1]
>>> cfg = Config.fromfile("./configs/test.py")
>>> cfg.filename
"/home/gorilla_lab/code/gorilla/configs/test.py"
>>> cfg.item4
"test"
>>> cfg
"Config [path: /home/gorilla_lab/code/gorilla/configs/test.py]: "
"{"item1": [1, 2], "item2": {"a": 0}, "item3": True, "item4": "test"}"
"""
def __init__(self,
cfg_dict: Optional[Dict] = None,
cfg_text: Optional[str] = None,
filename: Optional[str] = None):
if cfg_dict is None:
cfg_dict = dict()
elif not isinstance(cfg_dict, dict):
raise TypeError(f"cfg_dict must be a dict, "
f"but got {type(cfg_dict)}")
for key in cfg_dict:
if key in RESERVED_KEYS:
raise KeyError(f"{key} is reserved for config file")
super(Config, self).__setattr__("_cfg_dict", ConfigDict(cfg_dict))
super(Config, self).__setattr__("_filename", filename)
if cfg_text:
text = cfg_text
elif filename:
with open(filename, "r") as f:
text = f.read()
else:
text = ""
super(Config, self).__setattr__("_text", text)
@staticmethod
def _file2dict(filename: str):
filename = os.path.abspath(os.path.expanduser(filename))
check_file(filename)
from gorilla.fileio import load
cfg_dict = ConfigDict(load(filename))
with open(filename, "r") as f:
cfg_text = f.read()
# here cfg_dict is still the same as content in --config file,
# and the code block below read 4 sub-config file then merge into one.
if BASE_KEY in cfg_dict:
cfg_dir = os.path.dirname(filename)
base_filename = cfg_dict.pop(BASE_KEY)
base_filename = base_filename if isinstance(
base_filename, list) else [base_filename]
cfg_dict_list = list()
cfg_text_list = list()
for f in base_filename:
_cfg_dict, _cfg_text = Config._file2dict(os.path.join(cfg_dir, f))
cfg_dict_list.append(_cfg_dict)
cfg_text_list.append(_cfg_text)
base_cfg_dict = dict()
for c in cfg_dict_list:
if len(base_cfg_dict.keys() & c.keys()) > 0:
# e.g. sub-config file about dataset should not overlap with
# the one about model
raise KeyError("Duplicate key is not allowed among bases")
base_cfg_dict.update(c)
cfg_dict = Config._merge_a_into_b(cfg_dict, base_cfg_dict)
# merge cfg_text
cfg_text_list.append(cfg_text)
cfg_text = "\n".join(cfg_text_list)
return cfg_dict, cfg_text
@staticmethod
def _merge_a_into_b(a, b):
r"""merge dict ``a`` into dict ``b`` (non-inplace).
Values in ``a`` will overwrite ``b``. ``b`` is copied first to avoid
in-place modifications.
Args:
a (dict): The source dict to be merged into ``b``.
b (dict): The origin dict to be fetch keys from ``a``.
Returns:
dict: The modified dict of ``b`` using ``a``.
Examples:
# Normally merge a into b.
>>> Config._merge_a_into_b(
... dict(obj=dict(a=2)), dict(obj=dict(a=1)))
{"obj": {"a": 2}}
"""
b = b.copy()
for k, v in a.items():
if isinstance(v, dict) and k in b:
allowed_types = dict
if not isinstance(b[k], allowed_types):
raise TypeError(
f"{k}={v} in child config cannot inherit from base "
f"because {k} is a dict in the child config but is of "
f"type {type(b[k])} in base config.")
b[k] = Config._merge_a_into_b(v, b[k])
else:
b[k] = v
return b
@staticmethod
def fromfile(filename: str):
r"""cfg_text is the text content read from 5 files, and cfg_dict is
a dict resolved by the text content.
"""
cfg_dict, cfg_text = Config._file2dict(filename)
return Config(cfg_dict, cfg_text=cfg_text, filename=filename)
@staticmethod
def fromstring(cfg_str, file_format):
"""Generate config from config str.
Args:
cfg_str (str): Config str.
file_format (str): Config file format corresponding to the
config str. Only py/yml/yaml/json type are supported now!
Returns:
obj:`Config`: Config obj.
"""
if file_format not in [".py", ".json", ".yaml", ".yml"]:
raise IOError("Only py/yml/yaml/json type are supported now!")
if file_format != ".py" and "dict(" in cfg_str:
# check if users specify a wrong suffix for python
warnings.warn(
"Please check 'file_format', the file format may be .py")
with tempfile.NamedTemporaryFile("w", suffix=file_format) as temp_file:
temp_file.write(cfg_str)
temp_file.flush()
cfg = Config.fromfile(temp_file.name)
return cfg
@property
def filename(self) -> str:
return self._filename
@property
def text(self) -> str:
return self._text
def __repr__(self) -> str:
content = f"Config (path: {self.filename})\n"
content += json.dumps(self._cfg_dict, indent=4, ensure_ascii=False)
return content
def __len__(self) -> int:
return len(self._cfg_dict)
def __getattr__(self, name: str):
return getattr(self._cfg_dict, name)
def __getitem__(self, name: str):
return self._cfg_dict.__getitem__(name)
def __setattr__(self, name: str, value: Dict):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setattr__(name, value)
def __setitem__(self, name: str, value: Dict):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setitem__(name, value)
def __iter__(self):
return iter(self._cfg_dict)
def dump(self, file: Optional[str] = None, **kwargs):
cfg_dict = self._cfg_dict.to_dict()
from gorilla.fileio import dump
if file is None:
# output the content
file_format = self.filename.split(".")[-1]
if file_format == "py":
return self.text
else:
return dump(cfg_dict, file_format=file_format, **kwargs)
else:
if file.endswith("py"):
with open(file, "w") as f:
f.write(self.text)
else:
dump(cfg_dict, file, **kwargs)
def merge_from_dict(self, options: Dict):
r"""Merge list into cfg_dict.
Merge the dict parsed by MultipleKVAction into this cfg.
Examples:
>>> options = {"model.backbone.depth": 50,
... "model.backbone.with_cp":True}
>>> cfg = Config(dict(model=dict(backbone=dict(type="ResNet"))))
>>> cfg.merge_from_dict(options)
>>> cfg_dict = super(Config, self).__getattribute__("_cfg_dict")
>>> assert cfg_dict == dict(
... model=dict(backbone=dict(depth=50, with_cp=True)))
# Merge list element
>>> cfg = Config(dict(pipeline=[
... dict(type="LoadImage"), dict(type="LoadAnnotations")]))
>>> options = dict(pipeline={"0": dict(type="SelfLoadImage")})
Args:
options (dict): dict of configs to merge from.
"""
option_cfg_dict = {}
for full_key, v in options.items():
if v is None: # handle the case when a parameter simultaneously appears in argparse and config file
continue
d = option_cfg_dict
key_list = full_key.split(".")
for subkey in key_list[:-1]:
d.setdefault(subkey, ConfigDict())
d = d[subkey]
subkey = key_list[-1]
d[subkey] = v
cfg_dict = self._cfg_dict
cfg_dict = Config._merge_a_into_b(option_cfg_dict, cfg_dict)
# NOTE: strange phenomenon
# self._cfg_dict = cfg_dict
super(Config, self).__setattr__("_cfg_dict", cfg_dict)
def merge_cfg_and_args(cfg: Optional[Config] = None,
args: Optional[Namespace] = None) -> Config:
r"""merge args and cfg into a Config by calling 'merge_from_dict' func
Args:
cfg (Config, optional): Config from cfg file.
args (Namespace, optional): Argument parameters input.
Returns:
Config: Merged Config
"""
assert cfg is not None or args is not None, "'cfg' or 'args' can not be None simultaneously"
if cfg is None:
cfg = Config()
else:
assert isinstance(
cfg, Config
), f"'cfg' must be None or gorilla.Config, but got {type(cfg)}"
if args is None:
args = Namespace()
else:
assert isinstance(
args, Namespace
), f"'args' must be None or argsparse.Namespace, but got {type(args)}"
# convert namespace into dict
args_dict = vars(args)
cfg.merge_from_dict(args_dict)
return cfg
|
py | b40e25558e7efd88b2700c4ecec81479f2821702 | # pylint: disable=no-self-use
import pytest
import requests
from uservice.views.listjobs import (
validate_json_job, validate_json_job_list, ValidationError)
from test.testbase import ADMINUSER, ADMINPW
class TestJsonJobValidation:
@pytest.fixture
def job(self):
return {
'id': 'abcd',
'source_url': 'http://example.com/foobar',
'view_result_url': 'http://example.com/foobaz',
'target_url': 'http://example.com/foobiz',
}
def test_valid_job(self, job):
validate_json_job(job)
def test_id_is_not_string(self, job):
job['id'] = 42
with pytest.raises(ValidationError) as excinfo:
validate_json_job(job)
expected_error = "Expected string in field 'id'"
assert str(excinfo.value) == expected_error
def test_missing_field_source_url(self, job):
del job['source_url']
with pytest.raises(ValidationError) as excinfo:
validate_json_job(job)
expected_error = "Missing required fields: source_url"
assert str(excinfo.value) == expected_error
def test_missing_field_id(self, job):
del job['id']
with pytest.raises(ValidationError) as excinfo:
validate_json_job(job)
expected_error = "Missing required fields: id"
assert str(excinfo.value) == expected_error
def test_extra_field(self, job):
job['some_extra_field'] = 'blabliblu'
with pytest.raises(ValidationError) as excinfo:
validate_json_job(job)
expected_error = (
"These fields do not exist or are for internal use: "
"some_extra_field"
)
assert str(excinfo.value) == expected_error
@pytest.fixture
def job_list(self):
return [
{
'id': 'abc',
'source_url': 'http://example.com/abc',
},
{
'id': 'xyz',
'source_url': 'http://example.com/xyz',
},
]
def test_validate_valid_json_job_list(self, job_list):
validate_json_job_list(job_list)
def test_validate_invalid_json_job_list(self, job_list):
del job_list[0]['id']
job_list[1]['foo'] = 'bar'
with pytest.raises(ValidationError) as excinfo:
validate_json_job_list(job_list)
expected_error = (
"Job#0: Missing required fields: id\n"
"Job#1: These fields do not exist or are for internal use: foo"
)
assert str(excinfo.value) == expected_error
@pytest.mark.system
class TestAddJobs:
@pytest.fixture
def session(self):
requests_session = requests.Session()
requests_session.auth = (ADMINUSER, ADMINPW)
return requests_session
@pytest.fixture
def project(self, session, microq_service):
name = 'testproject'
url = "{}/rest_api/v4/{}".format(microq_service, name)
session.put(url).raise_for_status()
yield url
if session.head(url).status_code == 200:
session.delete(url).raise_for_status()
def test_add_a_single_valid_job(self, session, project):
job = {'id': '42', 'source_url': 'http://example.com/job'}
session.post(project + '/jobs', json=job).raise_for_status()
assert len(session.get(project + '/jobs').json()['Jobs']) == 1
assert session.get(project).json()['NrJobsAdded'] == 1
def test_add_a_single_valid_job_with_now(self, session, project):
job = {'id': '42', 'source_url': 'http://example.com/job'}
params = {'now': '2000-01-01 10:00'}
response = session.post(project + '/jobs', params=params, json=job)
response.raise_for_status()
assert len(session.get(project + '/jobs').json()['Jobs']) == 1
def test_add_a_single_invalid_job(self, session, project):
job = {}
response = session.post(project + '/jobs', json=job)
assert not response.ok
responsejson = response.json()
assert 'error' in responsejson
err = responsejson['error']
assert err.startswith("Missing required fields:")
_, missing = err.split(':', 1)
assert {v.strip() for v in missing.split(',')} == {'id', 'source_url'}
assert not session.get(project + '/jobs').json()['Jobs']
def test_add_the_same_job_twice(self, session, project):
job = {'id': '42', 'source_url': 'http://example.com/job'}
session.post(project + '/jobs', json=job).raise_for_status()
response = session.post(project + '/jobs', json=job)
assert response.status_code == 201
assert len(session.get(project + '/jobs').json()['Jobs']) == 1
assert session.get(project).json()['NrJobsAdded'] == 1
def test_add_a_list_of_jobs(self, session, project):
jobs = [
{'id': '41', 'source_url': 'http://example.com/job'},
{'id': '42', 'source_url': 'http://example.com/job'},
]
session.post(project + '/jobs', json=jobs).raise_for_status()
assert len(session.get(project + '/jobs').json()['Jobs']) == 2
assert session.get(project).json()['NrJobsAdded'] == 2
def test_add_a_list_of_jobs_with_invalid_ones(self, session, project):
jobs = [
{'id': '41', 'source_url': 'http://example.com/job'},
{'id': '42'},
{'source_url': 'http://example.com/job3'},
]
response = session.post(project + '/jobs', json=jobs)
assert not response.ok
expected_error = (
'Job#1: Missing required fields: source_url\n'
'Job#2: Missing required fields: id'
)
assert response.json() == {'error': expected_error}
assert not session.get(project + '/jobs').json()['Jobs']
def test_add_a_list_of_jobs_with_duplicated_id(self, session, project):
jobs = [
{'id': '41', 'source_url': 'http://example.com/job1'},
{'id': '41', 'source_url': 'http://example.com/job2'},
{'id': '42', 'source_url': 'http://example.com/job3'},
]
response = session.post(project + '/jobs', json=jobs)
assert not response.ok
expected_error = 'Job#1: A job with id 41 already exists.'
assert response.json() == {'error': expected_error}
assert not session.get(project + '/jobs').json()['Jobs']
def test_add_a_list_of_jobs_with_duplicates(self, session, project):
jobs = [
{'id': '41', 'source_url': 'http://example.com/job1'},
{'id': '41', 'source_url': 'http://example.com/job1'},
{'id': '42', 'source_url': 'http://example.com/job3'},
]
response = session.post(project + '/jobs', json=jobs)
assert response.status_code == 201
assert len(session.get(project + '/jobs').json()['Jobs']) == 2
def test_add_a_list_of_jobs_with_one_existing_id(self, session, project):
job = {'id': '42', 'source_url': 'http://example.com/job'}
session.post(project + '/jobs', json=job).raise_for_status()
jobs = [
{'id': '41', 'source_url': 'http://example.com/job1'},
{'id': '42', 'source_url': 'http://example.com/job2'},
]
response = session.post(project + '/jobs', json=jobs)
assert not response.ok
expected_error = 'Job#1: A job with id 42 already exists.'
assert response.json() == {'error': expected_error}
assert len(session.get(project + '/jobs').json()['Jobs']) == 1
def test_add_a_list_of_jobs_with_one_existing_job(self, session, project):
job = {'id': '42', 'source_url': 'http://example.com/job'}
session.post(project + '/jobs', json=job).raise_for_status()
jobs = [
{'id': '41', 'source_url': 'http://example.com/job1'},
{'id': '42', 'source_url': 'http://example.com/job'},
]
response = session.post(project + '/jobs', json=jobs)
assert response.status_code == 201
assert len(session.get(project + '/jobs').json()['Jobs']) == 2
|
py | b40e2588fb3961680fc533af1c917aa35f1ccf99 | #!/usr/bin/python
# Converts .json files from Translatewiki into .js files.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import codecs # for codecs.open(..., 'utf-8')
import glob
import json # for json.load()
import os # for os.path()
import subprocess # for subprocess.check_call()
from common import InputError
# Store parsed command-line arguments in global variable.
args = None
def _insert_breaks(s, min_length, max_length):
"""Inserts line breaks to try to get line lengths within the given range.
This tries to minimize raggedness and to break lines at punctuation
(periods and commas). It never splits words or numbers. Multiple spaces
may be converted into single spaces.
Args:
s: The string to split.
min_length: The requested minimum number of characters per line.
max_length: The requested minimum number of characters per line.
Returns:
A copy of the original string with zero or more line breaks inserted.
"""
newline = '\\n'
if len(s) < min_length:
return s
# Try splitting by sentences. This assumes sentences end with periods.
sentences = s.split('.')
# Remove empty sentences.
sentences = [sen for sen in sentences if sen]
# If all sentences are at least min_length and at most max_length,
# then return one per line.
if not [sen for sen in sentences if
len(sen) > max_length or len(sen) < min_length]:
return newline.join([sen.strip() + '.' for sen in sentences])
# Otherwise, divide into words, and use a greedy algorithm for the first
# line, and try to get later lines as close as possible in length.
words = [word for word in s.split(' ') if word]
line1 = ''
while (len(line1) + 1 + len(words[0]) < max_length and
# Preferentially split on periods and commas.
(not ((line1.endswith('. ') or line1.endswith(', ')) and
len(line1) > min_length))):
line1 += words.pop(0) + ' '
# If it all fits on one line, return that line.
if not words:
return line1
ideal_length = len(line1)
output = line1
line = ''
while words:
line += words.pop(0) + ' '
if words:
potential_len = len(line) + len(words[0])
if (potential_len > max_length or
potential_len - ideal_length > ideal_length - len(line) or
(line.endswith('. ') and len(line) > min_length)):
output += newline + line
line = ''
output += newline + line
return output
def _create_xlf(target_lang):
"""Creates a <target_lang>.xlf file for Soy.
Args:
target_lang: The ISO 639 language code for the target language.
This is used in the name of the file and in the metadata.
Returns:
A pointer to a file to which the metadata has been written.
Raises:
IOError: An error occurred while opening or writing the file.
"""
filename = os.path.join(os.curdir, args.output_dir, target_lang + '.xlf')
out_file = codecs.open(filename, 'w', 'utf-8')
out_file.write("""<?xml version="1.0" encoding="UTF-8"?>
<xliff version="1.2" xmlns="urn:oasis:names:tc:xliff:document:1.2">
<file original="SoyMsgBundle"
datatype="x-soy-msg-bundle"
xml:space="preserve"
source-language="{0}"
target-language="{1}">
<body>""".format(args.source_lang, target_lang))
return out_file
def _close_xlf(xlf_file):
"""Closes a <target_lang>.xlf file created with create_xlf().
This includes writing the terminating XML.
Args:
xlf_file: A pointer to a file created by _create_xlf().
Raises:
IOError: An error occurred while writing to or closing the file.
"""
xlf_file.write("""
</body>
</file>
</xliff>
""")
xlf_file.close()
def _process_file(path_to_json, target_lang, key_dict):
"""Creates an .xlf file corresponding to the specified .json input file.
The name of the input file must be target_lang followed by '.json'.
The name of the output file will be target_lang followed by '.js'.
Args:
path_to_json: Path to the directory of xx.json files.
target_lang: A IETF language code (RFC 4646), such as 'es' or 'pt-br'.
key_dict: Dictionary mapping Blockly keys (e.g., Maze.turnLeft) to
Closure keys (hash numbers).
Raises:
IOError: An I/O error occurred with an input or output file.
InputError: Input JSON could not be parsed.
KeyError: Key found in input file but not in key file.
"""
filename = os.path.join(path_to_json, target_lang + '.json')
in_file = open(filename)
try:
j = json.load(in_file)
in_file.close()
except ValueError, e:
print('Error reading ' + filename)
raise InputError(file, str(e))
out_file = _create_xlf(target_lang)
for key in j:
if key != '@metadata':
try:
identifier = key_dict[key]
except KeyError as e:
print('Key "%s" is in %s but not in %s' %
(key, filename, args.key_file))
raise e
target = j.get(key)
# Only insert line breaks for tooltips.
if key.lower().find('tooltip') != -1:
target = _insert_breaks(
j.get(key), args.min_length, args.max_length)
out_file.write(u"""
<trans-unit id="{0}" datatype="html">
<target>{1}</target>
</trans-unit>""".format(identifier, target))
_close_xlf(out_file)
def main():
"""Parses arguments and iterates over files."""
# Set up argument parser.
parser = argparse.ArgumentParser(description='Convert JSON files to JS.')
parser.add_argument('--source_lang', default='en',
help='ISO 639-1 source language code')
parser.add_argument('--output_dir', default='generated/',
help='relative directory for output files')
parser.add_argument('--key_file', default='json/keys.json',
help='relative path to input keys file')
parser.add_argument('--template', default='template.soy')
parser.add_argument('--min_length', default=30,
help='minimum line length (not counting last line)')
parser.add_argument('--max_length', default=50,
help='maximum line length (not guaranteed)')
parser.add_argument('--path_to_jar', default='third-party-downloads',
help='relative path from working directory to '
'SoyToJsSrcCompiler.jar')
parser.add_argument('files', nargs='+', help='input files')
# Initialize global variables.
global args
args = parser.parse_args()
# Make sure output_dir ends with slash.
if (not args.output_dir.endswith(os.path.sep)):
args.output_dir += os.path.sep
# Read in keys.json, mapping descriptions (e.g., Maze.turnLeft) to
# Closure keys (long hash numbers).
key_file = open(args.key_file)
key_dict = json.load(key_file)
key_file.close()
# Process each input file.
print('Creating .xlf files...')
processed_langs = []
if len(args.files) == 1:
# Windows does not expand globs automatically.
args.files = glob.glob(args.files[0])
for arg_file in args.files:
(path_to_json, filename) = os.path.split(arg_file)
if not filename.endswith('.json'):
raise InputError(filename, 'filenames must end with ".json"')
target_lang = filename[:filename.index('.')]
if not target_lang in ('qqq', 'keys'):
processed_langs.append(target_lang)
_process_file(path_to_json, target_lang, key_dict)
# Output command line for Closure compiler.
if processed_langs:
print('Creating .js files...')
processed_lang_list = ','.join(processed_langs)
subprocess.check_call([
'java',
'-jar', os.path.join(args.path_to_jar, 'SoyToJsSrcCompiler.jar'),
'--locales', processed_lang_list,
'--shouldProvideRequireSoyNamespaces',
'--isUsingIjData',
'--messageFilePathFormat', args.output_dir + '{LOCALE}.xlf',
'--outputPathFormat', os.path.join(args.output_dir, '{LOCALE}', 'soy.js'),
'--srcs', args.template])
if len(processed_langs) == 1:
print('Created ' + processed_lang_list + '/soy.js in ' + args.output_dir)
else:
print('Created {' + processed_lang_list + '}/soy.js in ' + args.output_dir)
for lang in processed_langs:
os.remove(args.output_dir + lang + '.xlf')
print('Removed .xlf files.')
if __name__ == '__main__':
main()
|
py | b40e258b06336692f7e1e70948adb57cdee6b4ac | # Set up imports
import pathlib
import csv
# Set up file paths
pybank_csv = pathlib.Path("budget_data.csv")
# Create lists
months = []
profits = []
monthly_change = []
pybank_dictionary = {}
# Open the CSV file
with open (pybank_csv, "r") as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
csv_header = next(csvreader)
# Loop through the csv file
for rows in csvreader:
# Append all months and profits into separate lists
months.append(rows[0])
profits.append(int(rows[1]))
# Loop through profits list to calculate average of the changes
for x in range(len(profits)-1):
monthly_change.append(profits[x+1]-profits[x])
# Create a list starting from Feb 2010
months_plus_one=months[1:]
# Calculate average monthly change
average_change = round(sum(monthly_change)/len(monthly_change),2)
# Zip months list and monthly_change list into dictionary
monthly_change_dictionary = dict(zip(months_plus_one,monthly_change))
# Calculate greatest increase in profits
greatest_increase = max(monthly_change)
greatest_increase_month = max(monthly_change_dictionary,key=monthly_change_dictionary.get)
# Calculate greatest decrease in losses
greatest_decrease = min(monthly_change)
greatest_decrease_month = min(monthly_change_dictionary,key=monthly_change_dictionary.get)
# Print out financial statement
print("Financial Analysis")
print("-----------------------------")
print(f"Total months: {str(len(months))}")
print(f"Total: ${sum(profits)}")
print(f"Average Change: {average_change}")
print(f"Greatest Increase in Profits: {greatest_increase_month} (${greatest_increase})")
print(f"Greatest Decrease in Profits: {greatest_decrease_month} (${greatest_decrease})")
# Output files to CSV
output_path = pathlib.Path("output_pybank.csv")
with open(output_path, "w") as csvfile:
csvwriter = csv.writer(csvfile, delimiter=",")
csvwriter.writerow(["Financial Analysis"])
csvwriter.writerow(["Total months: ",str(len(months))])
csvwriter.writerow(["Total: ",sum(profits)])
csvwriter.writerow(["Average Change: ",average_change])
csvwriter.writerow([f"Greatest Increase in Profits: ",f"{greatest_increase_month} ({greatest_increase})"])
csvwriter.writerow([f"Greatest Decrease in Profits: ",f"{greatest_decrease_month} ({greatest_decrease})"])
|
py | b40e26e00a2af45e5825bb0aeb83bda1e041cbe5 | # Generated by Django 2.0.4 on 2018-04-04 23:37
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('assignments', '0006_auto_20180404_2334'),
]
operations = [
migrations.AlterField(
model_name='assignment',
name='date_created',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False, verbose_name='Date Created'),
),
]
|
py | b40e275c06cd7b1857a78ba32c58217ec8a3c399 | #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import calendar
import codecs
import collections
import contextlib
import ctypes
import datetime
import email.utils
import email.header
import errno
import functools
import gzip
import hashlib
import hmac
import importlib.util
import io
import itertools
import json
import locale
import math
import operator
import os
import platform
import random
import re
import socket
import ssl
import subprocess
import sys
import tempfile
import time
import traceback
import xml.etree.ElementTree
import zlib
from .compat import (
compat_HTMLParseError,
compat_HTMLParser,
compat_HTTPError,
compat_basestring,
compat_chr,
compat_cookiejar,
compat_ctypes_WINFUNCTYPE,
compat_etree_fromstring,
compat_expanduser,
compat_html_entities,
compat_html_entities_html5,
compat_http_client,
compat_integer_types,
compat_numeric_types,
compat_kwargs,
compat_os_name,
compat_parse_qs,
compat_shlex_quote,
compat_str,
compat_struct_pack,
compat_struct_unpack,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urllib_parse_urlunparse,
compat_urllib_parse_quote,
compat_urllib_parse_quote_plus,
compat_urllib_parse_unquote_plus,
compat_urllib_request,
compat_urlparse,
compat_xpath,
)
from .socks import (
ProxyType,
sockssocket,
)
def register_socks_protocols():
# "Register" SOCKS protocols
# In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
# URLs with protocols not in urlparse.uses_netloc are not handled correctly
for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
if scheme not in compat_urlparse.uses_netloc:
compat_urlparse.uses_netloc.append(scheme)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
def random_user_agent():
_USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
_CHROME_VERSIONS = (
'74.0.3729.129',
'76.0.3780.3',
'76.0.3780.2',
'74.0.3729.128',
'76.0.3780.1',
'76.0.3780.0',
'75.0.3770.15',
'74.0.3729.127',
'74.0.3729.126',
'76.0.3779.1',
'76.0.3779.0',
'75.0.3770.14',
'74.0.3729.125',
'76.0.3778.1',
'76.0.3778.0',
'75.0.3770.13',
'74.0.3729.124',
'74.0.3729.123',
'73.0.3683.121',
'76.0.3777.1',
'76.0.3777.0',
'75.0.3770.12',
'74.0.3729.122',
'76.0.3776.4',
'75.0.3770.11',
'74.0.3729.121',
'76.0.3776.3',
'76.0.3776.2',
'73.0.3683.120',
'74.0.3729.120',
'74.0.3729.119',
'74.0.3729.118',
'76.0.3776.1',
'76.0.3776.0',
'76.0.3775.5',
'75.0.3770.10',
'74.0.3729.117',
'76.0.3775.4',
'76.0.3775.3',
'74.0.3729.116',
'75.0.3770.9',
'76.0.3775.2',
'76.0.3775.1',
'76.0.3775.0',
'75.0.3770.8',
'74.0.3729.115',
'74.0.3729.114',
'76.0.3774.1',
'76.0.3774.0',
'75.0.3770.7',
'74.0.3729.113',
'74.0.3729.112',
'74.0.3729.111',
'76.0.3773.1',
'76.0.3773.0',
'75.0.3770.6',
'74.0.3729.110',
'74.0.3729.109',
'76.0.3772.1',
'76.0.3772.0',
'75.0.3770.5',
'74.0.3729.108',
'74.0.3729.107',
'76.0.3771.1',
'76.0.3771.0',
'75.0.3770.4',
'74.0.3729.106',
'74.0.3729.105',
'75.0.3770.3',
'74.0.3729.104',
'74.0.3729.103',
'74.0.3729.102',
'75.0.3770.2',
'74.0.3729.101',
'75.0.3770.1',
'75.0.3770.0',
'74.0.3729.100',
'75.0.3769.5',
'75.0.3769.4',
'74.0.3729.99',
'75.0.3769.3',
'75.0.3769.2',
'75.0.3768.6',
'74.0.3729.98',
'75.0.3769.1',
'75.0.3769.0',
'74.0.3729.97',
'73.0.3683.119',
'73.0.3683.118',
'74.0.3729.96',
'75.0.3768.5',
'75.0.3768.4',
'75.0.3768.3',
'75.0.3768.2',
'74.0.3729.95',
'74.0.3729.94',
'75.0.3768.1',
'75.0.3768.0',
'74.0.3729.93',
'74.0.3729.92',
'73.0.3683.117',
'74.0.3729.91',
'75.0.3766.3',
'74.0.3729.90',
'75.0.3767.2',
'75.0.3767.1',
'75.0.3767.0',
'74.0.3729.89',
'73.0.3683.116',
'75.0.3766.2',
'74.0.3729.88',
'75.0.3766.1',
'75.0.3766.0',
'74.0.3729.87',
'73.0.3683.115',
'74.0.3729.86',
'75.0.3765.1',
'75.0.3765.0',
'74.0.3729.85',
'73.0.3683.114',
'74.0.3729.84',
'75.0.3764.1',
'75.0.3764.0',
'74.0.3729.83',
'73.0.3683.113',
'75.0.3763.2',
'75.0.3761.4',
'74.0.3729.82',
'75.0.3763.1',
'75.0.3763.0',
'74.0.3729.81',
'73.0.3683.112',
'75.0.3762.1',
'75.0.3762.0',
'74.0.3729.80',
'75.0.3761.3',
'74.0.3729.79',
'73.0.3683.111',
'75.0.3761.2',
'74.0.3729.78',
'74.0.3729.77',
'75.0.3761.1',
'75.0.3761.0',
'73.0.3683.110',
'74.0.3729.76',
'74.0.3729.75',
'75.0.3760.0',
'74.0.3729.74',
'75.0.3759.8',
'75.0.3759.7',
'75.0.3759.6',
'74.0.3729.73',
'75.0.3759.5',
'74.0.3729.72',
'73.0.3683.109',
'75.0.3759.4',
'75.0.3759.3',
'74.0.3729.71',
'75.0.3759.2',
'74.0.3729.70',
'73.0.3683.108',
'74.0.3729.69',
'75.0.3759.1',
'75.0.3759.0',
'74.0.3729.68',
'73.0.3683.107',
'74.0.3729.67',
'75.0.3758.1',
'75.0.3758.0',
'74.0.3729.66',
'73.0.3683.106',
'74.0.3729.65',
'75.0.3757.1',
'75.0.3757.0',
'74.0.3729.64',
'73.0.3683.105',
'74.0.3729.63',
'75.0.3756.1',
'75.0.3756.0',
'74.0.3729.62',
'73.0.3683.104',
'75.0.3755.3',
'75.0.3755.2',
'73.0.3683.103',
'75.0.3755.1',
'75.0.3755.0',
'74.0.3729.61',
'73.0.3683.102',
'74.0.3729.60',
'75.0.3754.2',
'74.0.3729.59',
'75.0.3753.4',
'74.0.3729.58',
'75.0.3754.1',
'75.0.3754.0',
'74.0.3729.57',
'73.0.3683.101',
'75.0.3753.3',
'75.0.3752.2',
'75.0.3753.2',
'74.0.3729.56',
'75.0.3753.1',
'75.0.3753.0',
'74.0.3729.55',
'73.0.3683.100',
'74.0.3729.54',
'75.0.3752.1',
'75.0.3752.0',
'74.0.3729.53',
'73.0.3683.99',
'74.0.3729.52',
'75.0.3751.1',
'75.0.3751.0',
'74.0.3729.51',
'73.0.3683.98',
'74.0.3729.50',
'75.0.3750.0',
'74.0.3729.49',
'74.0.3729.48',
'74.0.3729.47',
'75.0.3749.3',
'74.0.3729.46',
'73.0.3683.97',
'75.0.3749.2',
'74.0.3729.45',
'75.0.3749.1',
'75.0.3749.0',
'74.0.3729.44',
'73.0.3683.96',
'74.0.3729.43',
'74.0.3729.42',
'75.0.3748.1',
'75.0.3748.0',
'74.0.3729.41',
'75.0.3747.1',
'73.0.3683.95',
'75.0.3746.4',
'74.0.3729.40',
'74.0.3729.39',
'75.0.3747.0',
'75.0.3746.3',
'75.0.3746.2',
'74.0.3729.38',
'75.0.3746.1',
'75.0.3746.0',
'74.0.3729.37',
'73.0.3683.94',
'75.0.3745.5',
'75.0.3745.4',
'75.0.3745.3',
'75.0.3745.2',
'74.0.3729.36',
'75.0.3745.1',
'75.0.3745.0',
'75.0.3744.2',
'74.0.3729.35',
'73.0.3683.93',
'74.0.3729.34',
'75.0.3744.1',
'75.0.3744.0',
'74.0.3729.33',
'73.0.3683.92',
'74.0.3729.32',
'74.0.3729.31',
'73.0.3683.91',
'75.0.3741.2',
'75.0.3740.5',
'74.0.3729.30',
'75.0.3741.1',
'75.0.3741.0',
'74.0.3729.29',
'75.0.3740.4',
'73.0.3683.90',
'74.0.3729.28',
'75.0.3740.3',
'73.0.3683.89',
'75.0.3740.2',
'74.0.3729.27',
'75.0.3740.1',
'75.0.3740.0',
'74.0.3729.26',
'73.0.3683.88',
'73.0.3683.87',
'74.0.3729.25',
'75.0.3739.1',
'75.0.3739.0',
'73.0.3683.86',
'74.0.3729.24',
'73.0.3683.85',
'75.0.3738.4',
'75.0.3738.3',
'75.0.3738.2',
'75.0.3738.1',
'75.0.3738.0',
'74.0.3729.23',
'73.0.3683.84',
'74.0.3729.22',
'74.0.3729.21',
'75.0.3737.1',
'75.0.3737.0',
'74.0.3729.20',
'73.0.3683.83',
'74.0.3729.19',
'75.0.3736.1',
'75.0.3736.0',
'74.0.3729.18',
'73.0.3683.82',
'74.0.3729.17',
'75.0.3735.1',
'75.0.3735.0',
'74.0.3729.16',
'73.0.3683.81',
'75.0.3734.1',
'75.0.3734.0',
'74.0.3729.15',
'73.0.3683.80',
'74.0.3729.14',
'75.0.3733.1',
'75.0.3733.0',
'75.0.3732.1',
'74.0.3729.13',
'74.0.3729.12',
'73.0.3683.79',
'74.0.3729.11',
'75.0.3732.0',
'74.0.3729.10',
'73.0.3683.78',
'74.0.3729.9',
'74.0.3729.8',
'74.0.3729.7',
'75.0.3731.3',
'75.0.3731.2',
'75.0.3731.0',
'74.0.3729.6',
'73.0.3683.77',
'73.0.3683.76',
'75.0.3730.5',
'75.0.3730.4',
'73.0.3683.75',
'74.0.3729.5',
'73.0.3683.74',
'75.0.3730.3',
'75.0.3730.2',
'74.0.3729.4',
'73.0.3683.73',
'73.0.3683.72',
'75.0.3730.1',
'75.0.3730.0',
'74.0.3729.3',
'73.0.3683.71',
'74.0.3729.2',
'73.0.3683.70',
'74.0.3729.1',
'74.0.3729.0',
'74.0.3726.4',
'73.0.3683.69',
'74.0.3726.3',
'74.0.3728.0',
'74.0.3726.2',
'73.0.3683.68',
'74.0.3726.1',
'74.0.3726.0',
'74.0.3725.4',
'73.0.3683.67',
'73.0.3683.66',
'74.0.3725.3',
'74.0.3725.2',
'74.0.3725.1',
'74.0.3724.8',
'74.0.3725.0',
'73.0.3683.65',
'74.0.3724.7',
'74.0.3724.6',
'74.0.3724.5',
'74.0.3724.4',
'74.0.3724.3',
'74.0.3724.2',
'74.0.3724.1',
'74.0.3724.0',
'73.0.3683.64',
'74.0.3723.1',
'74.0.3723.0',
'73.0.3683.63',
'74.0.3722.1',
'74.0.3722.0',
'73.0.3683.62',
'74.0.3718.9',
'74.0.3702.3',
'74.0.3721.3',
'74.0.3721.2',
'74.0.3721.1',
'74.0.3721.0',
'74.0.3720.6',
'73.0.3683.61',
'72.0.3626.122',
'73.0.3683.60',
'74.0.3720.5',
'72.0.3626.121',
'74.0.3718.8',
'74.0.3720.4',
'74.0.3720.3',
'74.0.3718.7',
'74.0.3720.2',
'74.0.3720.1',
'74.0.3720.0',
'74.0.3718.6',
'74.0.3719.5',
'73.0.3683.59',
'74.0.3718.5',
'74.0.3718.4',
'74.0.3719.4',
'74.0.3719.3',
'74.0.3719.2',
'74.0.3719.1',
'73.0.3683.58',
'74.0.3719.0',
'73.0.3683.57',
'73.0.3683.56',
'74.0.3718.3',
'73.0.3683.55',
'74.0.3718.2',
'74.0.3718.1',
'74.0.3718.0',
'73.0.3683.54',
'74.0.3717.2',
'73.0.3683.53',
'74.0.3717.1',
'74.0.3717.0',
'73.0.3683.52',
'74.0.3716.1',
'74.0.3716.0',
'73.0.3683.51',
'74.0.3715.1',
'74.0.3715.0',
'73.0.3683.50',
'74.0.3711.2',
'74.0.3714.2',
'74.0.3713.3',
'74.0.3714.1',
'74.0.3714.0',
'73.0.3683.49',
'74.0.3713.1',
'74.0.3713.0',
'72.0.3626.120',
'73.0.3683.48',
'74.0.3712.2',
'74.0.3712.1',
'74.0.3712.0',
'73.0.3683.47',
'72.0.3626.119',
'73.0.3683.46',
'74.0.3710.2',
'72.0.3626.118',
'74.0.3711.1',
'74.0.3711.0',
'73.0.3683.45',
'72.0.3626.117',
'74.0.3710.1',
'74.0.3710.0',
'73.0.3683.44',
'72.0.3626.116',
'74.0.3709.1',
'74.0.3709.0',
'74.0.3704.9',
'73.0.3683.43',
'72.0.3626.115',
'74.0.3704.8',
'74.0.3704.7',
'74.0.3708.0',
'74.0.3706.7',
'74.0.3704.6',
'73.0.3683.42',
'72.0.3626.114',
'74.0.3706.6',
'72.0.3626.113',
'74.0.3704.5',
'74.0.3706.5',
'74.0.3706.4',
'74.0.3706.3',
'74.0.3706.2',
'74.0.3706.1',
'74.0.3706.0',
'73.0.3683.41',
'72.0.3626.112',
'74.0.3705.1',
'74.0.3705.0',
'73.0.3683.40',
'72.0.3626.111',
'73.0.3683.39',
'74.0.3704.4',
'73.0.3683.38',
'74.0.3704.3',
'74.0.3704.2',
'74.0.3704.1',
'74.0.3704.0',
'73.0.3683.37',
'72.0.3626.110',
'72.0.3626.109',
'74.0.3703.3',
'74.0.3703.2',
'73.0.3683.36',
'74.0.3703.1',
'74.0.3703.0',
'73.0.3683.35',
'72.0.3626.108',
'74.0.3702.2',
'74.0.3699.3',
'74.0.3702.1',
'74.0.3702.0',
'73.0.3683.34',
'72.0.3626.107',
'73.0.3683.33',
'74.0.3701.1',
'74.0.3701.0',
'73.0.3683.32',
'73.0.3683.31',
'72.0.3626.105',
'74.0.3700.1',
'74.0.3700.0',
'73.0.3683.29',
'72.0.3626.103',
'74.0.3699.2',
'74.0.3699.1',
'74.0.3699.0',
'73.0.3683.28',
'72.0.3626.102',
'73.0.3683.27',
'73.0.3683.26',
'74.0.3698.0',
'74.0.3696.2',
'72.0.3626.101',
'73.0.3683.25',
'74.0.3696.1',
'74.0.3696.0',
'74.0.3694.8',
'72.0.3626.100',
'74.0.3694.7',
'74.0.3694.6',
'74.0.3694.5',
'74.0.3694.4',
'72.0.3626.99',
'72.0.3626.98',
'74.0.3694.3',
'73.0.3683.24',
'72.0.3626.97',
'72.0.3626.96',
'72.0.3626.95',
'73.0.3683.23',
'72.0.3626.94',
'73.0.3683.22',
'73.0.3683.21',
'72.0.3626.93',
'74.0.3694.2',
'72.0.3626.92',
'74.0.3694.1',
'74.0.3694.0',
'74.0.3693.6',
'73.0.3683.20',
'72.0.3626.91',
'74.0.3693.5',
'74.0.3693.4',
'74.0.3693.3',
'74.0.3693.2',
'73.0.3683.19',
'74.0.3693.1',
'74.0.3693.0',
'73.0.3683.18',
'72.0.3626.90',
'74.0.3692.1',
'74.0.3692.0',
'73.0.3683.17',
'72.0.3626.89',
'74.0.3687.3',
'74.0.3691.1',
'74.0.3691.0',
'73.0.3683.16',
'72.0.3626.88',
'72.0.3626.87',
'73.0.3683.15',
'74.0.3690.1',
'74.0.3690.0',
'73.0.3683.14',
'72.0.3626.86',
'73.0.3683.13',
'73.0.3683.12',
'74.0.3689.1',
'74.0.3689.0',
'73.0.3683.11',
'72.0.3626.85',
'73.0.3683.10',
'72.0.3626.84',
'73.0.3683.9',
'74.0.3688.1',
'74.0.3688.0',
'73.0.3683.8',
'72.0.3626.83',
'74.0.3687.2',
'74.0.3687.1',
'74.0.3687.0',
'73.0.3683.7',
'72.0.3626.82',
'74.0.3686.4',
'72.0.3626.81',
'74.0.3686.3',
'74.0.3686.2',
'74.0.3686.1',
'74.0.3686.0',
'73.0.3683.6',
'72.0.3626.80',
'74.0.3685.1',
'74.0.3685.0',
'73.0.3683.5',
'72.0.3626.79',
'74.0.3684.1',
'74.0.3684.0',
'73.0.3683.4',
'72.0.3626.78',
'72.0.3626.77',
'73.0.3683.3',
'73.0.3683.2',
'72.0.3626.76',
'73.0.3683.1',
'73.0.3683.0',
'72.0.3626.75',
'71.0.3578.141',
'73.0.3682.1',
'73.0.3682.0',
'72.0.3626.74',
'71.0.3578.140',
'73.0.3681.4',
'73.0.3681.3',
'73.0.3681.2',
'73.0.3681.1',
'73.0.3681.0',
'72.0.3626.73',
'71.0.3578.139',
'72.0.3626.72',
'72.0.3626.71',
'73.0.3680.1',
'73.0.3680.0',
'72.0.3626.70',
'71.0.3578.138',
'73.0.3678.2',
'73.0.3679.1',
'73.0.3679.0',
'72.0.3626.69',
'71.0.3578.137',
'73.0.3678.1',
'73.0.3678.0',
'71.0.3578.136',
'73.0.3677.1',
'73.0.3677.0',
'72.0.3626.68',
'72.0.3626.67',
'71.0.3578.135',
'73.0.3676.1',
'73.0.3676.0',
'73.0.3674.2',
'72.0.3626.66',
'71.0.3578.134',
'73.0.3674.1',
'73.0.3674.0',
'72.0.3626.65',
'71.0.3578.133',
'73.0.3673.2',
'73.0.3673.1',
'73.0.3673.0',
'72.0.3626.64',
'71.0.3578.132',
'72.0.3626.63',
'72.0.3626.62',
'72.0.3626.61',
'72.0.3626.60',
'73.0.3672.1',
'73.0.3672.0',
'72.0.3626.59',
'71.0.3578.131',
'73.0.3671.3',
'73.0.3671.2',
'73.0.3671.1',
'73.0.3671.0',
'72.0.3626.58',
'71.0.3578.130',
'73.0.3670.1',
'73.0.3670.0',
'72.0.3626.57',
'71.0.3578.129',
'73.0.3669.1',
'73.0.3669.0',
'72.0.3626.56',
'71.0.3578.128',
'73.0.3668.2',
'73.0.3668.1',
'73.0.3668.0',
'72.0.3626.55',
'71.0.3578.127',
'73.0.3667.2',
'73.0.3667.1',
'73.0.3667.0',
'72.0.3626.54',
'71.0.3578.126',
'73.0.3666.1',
'73.0.3666.0',
'72.0.3626.53',
'71.0.3578.125',
'73.0.3665.4',
'73.0.3665.3',
'72.0.3626.52',
'73.0.3665.2',
'73.0.3664.4',
'73.0.3665.1',
'73.0.3665.0',
'72.0.3626.51',
'71.0.3578.124',
'72.0.3626.50',
'73.0.3664.3',
'73.0.3664.2',
'73.0.3664.1',
'73.0.3664.0',
'73.0.3663.2',
'72.0.3626.49',
'71.0.3578.123',
'73.0.3663.1',
'73.0.3663.0',
'72.0.3626.48',
'71.0.3578.122',
'73.0.3662.1',
'73.0.3662.0',
'72.0.3626.47',
'71.0.3578.121',
'73.0.3661.1',
'72.0.3626.46',
'73.0.3661.0',
'72.0.3626.45',
'71.0.3578.120',
'73.0.3660.2',
'73.0.3660.1',
'73.0.3660.0',
'72.0.3626.44',
'71.0.3578.119',
'73.0.3659.1',
'73.0.3659.0',
'72.0.3626.43',
'71.0.3578.118',
'73.0.3658.1',
'73.0.3658.0',
'72.0.3626.42',
'71.0.3578.117',
'73.0.3657.1',
'73.0.3657.0',
'72.0.3626.41',
'71.0.3578.116',
'73.0.3656.1',
'73.0.3656.0',
'72.0.3626.40',
'71.0.3578.115',
'73.0.3655.1',
'73.0.3655.0',
'72.0.3626.39',
'71.0.3578.114',
'73.0.3654.1',
'73.0.3654.0',
'72.0.3626.38',
'71.0.3578.113',
'73.0.3653.1',
'73.0.3653.0',
'72.0.3626.37',
'71.0.3578.112',
'73.0.3652.1',
'73.0.3652.0',
'72.0.3626.36',
'71.0.3578.111',
'73.0.3651.1',
'73.0.3651.0',
'72.0.3626.35',
'71.0.3578.110',
'73.0.3650.1',
'73.0.3650.0',
'72.0.3626.34',
'71.0.3578.109',
'73.0.3649.1',
'73.0.3649.0',
'72.0.3626.33',
'71.0.3578.108',
'73.0.3648.2',
'73.0.3648.1',
'73.0.3648.0',
'72.0.3626.32',
'71.0.3578.107',
'73.0.3647.2',
'73.0.3647.1',
'73.0.3647.0',
'72.0.3626.31',
'71.0.3578.106',
'73.0.3635.3',
'73.0.3646.2',
'73.0.3646.1',
'73.0.3646.0',
'72.0.3626.30',
'71.0.3578.105',
'72.0.3626.29',
'73.0.3645.2',
'73.0.3645.1',
'73.0.3645.0',
'72.0.3626.28',
'71.0.3578.104',
'72.0.3626.27',
'72.0.3626.26',
'72.0.3626.25',
'72.0.3626.24',
'73.0.3644.0',
'73.0.3643.2',
'72.0.3626.23',
'71.0.3578.103',
'73.0.3643.1',
'73.0.3643.0',
'72.0.3626.22',
'71.0.3578.102',
'73.0.3642.1',
'73.0.3642.0',
'72.0.3626.21',
'71.0.3578.101',
'73.0.3641.1',
'73.0.3641.0',
'72.0.3626.20',
'71.0.3578.100',
'72.0.3626.19',
'73.0.3640.1',
'73.0.3640.0',
'72.0.3626.18',
'73.0.3639.1',
'71.0.3578.99',
'73.0.3639.0',
'72.0.3626.17',
'73.0.3638.2',
'72.0.3626.16',
'73.0.3638.1',
'73.0.3638.0',
'72.0.3626.15',
'71.0.3578.98',
'73.0.3635.2',
'71.0.3578.97',
'73.0.3637.1',
'73.0.3637.0',
'72.0.3626.14',
'71.0.3578.96',
'71.0.3578.95',
'72.0.3626.13',
'71.0.3578.94',
'73.0.3636.2',
'71.0.3578.93',
'73.0.3636.1',
'73.0.3636.0',
'72.0.3626.12',
'71.0.3578.92',
'73.0.3635.1',
'73.0.3635.0',
'72.0.3626.11',
'71.0.3578.91',
'73.0.3634.2',
'73.0.3634.1',
'73.0.3634.0',
'72.0.3626.10',
'71.0.3578.90',
'71.0.3578.89',
'73.0.3633.2',
'73.0.3633.1',
'73.0.3633.0',
'72.0.3610.4',
'72.0.3626.9',
'71.0.3578.88',
'73.0.3632.5',
'73.0.3632.4',
'73.0.3632.3',
'73.0.3632.2',
'73.0.3632.1',
'73.0.3632.0',
'72.0.3626.8',
'71.0.3578.87',
'73.0.3631.2',
'73.0.3631.1',
'73.0.3631.0',
'72.0.3626.7',
'71.0.3578.86',
'72.0.3626.6',
'73.0.3630.1',
'73.0.3630.0',
'72.0.3626.5',
'71.0.3578.85',
'72.0.3626.4',
'73.0.3628.3',
'73.0.3628.2',
'73.0.3629.1',
'73.0.3629.0',
'72.0.3626.3',
'71.0.3578.84',
'73.0.3628.1',
'73.0.3628.0',
'71.0.3578.83',
'73.0.3627.1',
'73.0.3627.0',
'72.0.3626.2',
'71.0.3578.82',
'71.0.3578.81',
'71.0.3578.80',
'72.0.3626.1',
'72.0.3626.0',
'71.0.3578.79',
'70.0.3538.124',
'71.0.3578.78',
'72.0.3623.4',
'72.0.3625.2',
'72.0.3625.1',
'72.0.3625.0',
'71.0.3578.77',
'70.0.3538.123',
'72.0.3624.4',
'72.0.3624.3',
'72.0.3624.2',
'71.0.3578.76',
'72.0.3624.1',
'72.0.3624.0',
'72.0.3623.3',
'71.0.3578.75',
'70.0.3538.122',
'71.0.3578.74',
'72.0.3623.2',
'72.0.3610.3',
'72.0.3623.1',
'72.0.3623.0',
'72.0.3622.3',
'72.0.3622.2',
'71.0.3578.73',
'70.0.3538.121',
'72.0.3622.1',
'72.0.3622.0',
'71.0.3578.72',
'70.0.3538.120',
'72.0.3621.1',
'72.0.3621.0',
'71.0.3578.71',
'70.0.3538.119',
'72.0.3620.1',
'72.0.3620.0',
'71.0.3578.70',
'70.0.3538.118',
'71.0.3578.69',
'72.0.3619.1',
'72.0.3619.0',
'71.0.3578.68',
'70.0.3538.117',
'71.0.3578.67',
'72.0.3618.1',
'72.0.3618.0',
'71.0.3578.66',
'70.0.3538.116',
'72.0.3617.1',
'72.0.3617.0',
'71.0.3578.65',
'70.0.3538.115',
'72.0.3602.3',
'71.0.3578.64',
'72.0.3616.1',
'72.0.3616.0',
'71.0.3578.63',
'70.0.3538.114',
'71.0.3578.62',
'72.0.3615.1',
'72.0.3615.0',
'71.0.3578.61',
'70.0.3538.113',
'72.0.3614.1',
'72.0.3614.0',
'71.0.3578.60',
'70.0.3538.112',
'72.0.3613.1',
'72.0.3613.0',
'71.0.3578.59',
'70.0.3538.111',
'72.0.3612.2',
'72.0.3612.1',
'72.0.3612.0',
'70.0.3538.110',
'71.0.3578.58',
'70.0.3538.109',
'72.0.3611.2',
'72.0.3611.1',
'72.0.3611.0',
'71.0.3578.57',
'70.0.3538.108',
'72.0.3610.2',
'71.0.3578.56',
'71.0.3578.55',
'72.0.3610.1',
'72.0.3610.0',
'71.0.3578.54',
'70.0.3538.107',
'71.0.3578.53',
'72.0.3609.3',
'71.0.3578.52',
'72.0.3609.2',
'71.0.3578.51',
'72.0.3608.5',
'72.0.3609.1',
'72.0.3609.0',
'71.0.3578.50',
'70.0.3538.106',
'72.0.3608.4',
'72.0.3608.3',
'72.0.3608.2',
'71.0.3578.49',
'72.0.3608.1',
'72.0.3608.0',
'70.0.3538.105',
'71.0.3578.48',
'72.0.3607.1',
'72.0.3607.0',
'71.0.3578.47',
'70.0.3538.104',
'72.0.3606.2',
'72.0.3606.1',
'72.0.3606.0',
'71.0.3578.46',
'70.0.3538.103',
'70.0.3538.102',
'72.0.3605.3',
'72.0.3605.2',
'72.0.3605.1',
'72.0.3605.0',
'71.0.3578.45',
'70.0.3538.101',
'71.0.3578.44',
'71.0.3578.43',
'70.0.3538.100',
'70.0.3538.99',
'71.0.3578.42',
'72.0.3604.1',
'72.0.3604.0',
'71.0.3578.41',
'70.0.3538.98',
'71.0.3578.40',
'72.0.3603.2',
'72.0.3603.1',
'72.0.3603.0',
'71.0.3578.39',
'70.0.3538.97',
'72.0.3602.2',
'71.0.3578.38',
'71.0.3578.37',
'72.0.3602.1',
'72.0.3602.0',
'71.0.3578.36',
'70.0.3538.96',
'72.0.3601.1',
'72.0.3601.0',
'71.0.3578.35',
'70.0.3538.95',
'72.0.3600.1',
'72.0.3600.0',
'71.0.3578.34',
'70.0.3538.94',
'72.0.3599.3',
'72.0.3599.2',
'72.0.3599.1',
'72.0.3599.0',
'71.0.3578.33',
'70.0.3538.93',
'72.0.3598.1',
'72.0.3598.0',
'71.0.3578.32',
'70.0.3538.87',
'72.0.3597.1',
'72.0.3597.0',
'72.0.3596.2',
'71.0.3578.31',
'70.0.3538.86',
'71.0.3578.30',
'71.0.3578.29',
'72.0.3596.1',
'72.0.3596.0',
'71.0.3578.28',
'70.0.3538.85',
'72.0.3595.2',
'72.0.3591.3',
'72.0.3595.1',
'72.0.3595.0',
'71.0.3578.27',
'70.0.3538.84',
'72.0.3594.1',
'72.0.3594.0',
'71.0.3578.26',
'70.0.3538.83',
'72.0.3593.2',
'72.0.3593.1',
'72.0.3593.0',
'71.0.3578.25',
'70.0.3538.82',
'72.0.3589.3',
'72.0.3592.2',
'72.0.3592.1',
'72.0.3592.0',
'71.0.3578.24',
'72.0.3589.2',
'70.0.3538.81',
'70.0.3538.80',
'72.0.3591.2',
'72.0.3591.1',
'72.0.3591.0',
'71.0.3578.23',
'70.0.3538.79',
'71.0.3578.22',
'72.0.3590.1',
'72.0.3590.0',
'71.0.3578.21',
'70.0.3538.78',
'70.0.3538.77',
'72.0.3589.1',
'72.0.3589.0',
'71.0.3578.20',
'70.0.3538.76',
'71.0.3578.19',
'70.0.3538.75',
'72.0.3588.1',
'72.0.3588.0',
'71.0.3578.18',
'70.0.3538.74',
'72.0.3586.2',
'72.0.3587.0',
'71.0.3578.17',
'70.0.3538.73',
'72.0.3586.1',
'72.0.3586.0',
'71.0.3578.16',
'70.0.3538.72',
'72.0.3585.1',
'72.0.3585.0',
'71.0.3578.15',
'70.0.3538.71',
'71.0.3578.14',
'72.0.3584.1',
'72.0.3584.0',
'71.0.3578.13',
'70.0.3538.70',
'72.0.3583.2',
'71.0.3578.12',
'72.0.3583.1',
'72.0.3583.0',
'71.0.3578.11',
'70.0.3538.69',
'71.0.3578.10',
'72.0.3582.0',
'72.0.3581.4',
'71.0.3578.9',
'70.0.3538.67',
'72.0.3581.3',
'72.0.3581.2',
'72.0.3581.1',
'72.0.3581.0',
'71.0.3578.8',
'70.0.3538.66',
'72.0.3580.1',
'72.0.3580.0',
'71.0.3578.7',
'70.0.3538.65',
'71.0.3578.6',
'72.0.3579.1',
'72.0.3579.0',
'71.0.3578.5',
'70.0.3538.64',
'71.0.3578.4',
'71.0.3578.3',
'71.0.3578.2',
'71.0.3578.1',
'71.0.3578.0',
'70.0.3538.63',
'69.0.3497.128',
'70.0.3538.62',
'70.0.3538.61',
'70.0.3538.60',
'70.0.3538.59',
'71.0.3577.1',
'71.0.3577.0',
'70.0.3538.58',
'69.0.3497.127',
'71.0.3576.2',
'71.0.3576.1',
'71.0.3576.0',
'70.0.3538.57',
'70.0.3538.56',
'71.0.3575.2',
'70.0.3538.55',
'69.0.3497.126',
'70.0.3538.54',
'71.0.3575.1',
'71.0.3575.0',
'71.0.3574.1',
'71.0.3574.0',
'70.0.3538.53',
'69.0.3497.125',
'70.0.3538.52',
'71.0.3573.1',
'71.0.3573.0',
'70.0.3538.51',
'69.0.3497.124',
'71.0.3572.1',
'71.0.3572.0',
'70.0.3538.50',
'69.0.3497.123',
'71.0.3571.2',
'70.0.3538.49',
'69.0.3497.122',
'71.0.3571.1',
'71.0.3571.0',
'70.0.3538.48',
'69.0.3497.121',
'71.0.3570.1',
'71.0.3570.0',
'70.0.3538.47',
'69.0.3497.120',
'71.0.3568.2',
'71.0.3569.1',
'71.0.3569.0',
'70.0.3538.46',
'69.0.3497.119',
'70.0.3538.45',
'71.0.3568.1',
'71.0.3568.0',
'70.0.3538.44',
'69.0.3497.118',
'70.0.3538.43',
'70.0.3538.42',
'71.0.3567.1',
'71.0.3567.0',
'70.0.3538.41',
'69.0.3497.117',
'71.0.3566.1',
'71.0.3566.0',
'70.0.3538.40',
'69.0.3497.116',
'71.0.3565.1',
'71.0.3565.0',
'70.0.3538.39',
'69.0.3497.115',
'71.0.3564.1',
'71.0.3564.0',
'70.0.3538.38',
'69.0.3497.114',
'71.0.3563.0',
'71.0.3562.2',
'70.0.3538.37',
'69.0.3497.113',
'70.0.3538.36',
'70.0.3538.35',
'71.0.3562.1',
'71.0.3562.0',
'70.0.3538.34',
'69.0.3497.112',
'70.0.3538.33',
'71.0.3561.1',
'71.0.3561.0',
'70.0.3538.32',
'69.0.3497.111',
'71.0.3559.6',
'71.0.3560.1',
'71.0.3560.0',
'71.0.3559.5',
'71.0.3559.4',
'70.0.3538.31',
'69.0.3497.110',
'71.0.3559.3',
'70.0.3538.30',
'69.0.3497.109',
'71.0.3559.2',
'71.0.3559.1',
'71.0.3559.0',
'70.0.3538.29',
'69.0.3497.108',
'71.0.3558.2',
'71.0.3558.1',
'71.0.3558.0',
'70.0.3538.28',
'69.0.3497.107',
'71.0.3557.2',
'71.0.3557.1',
'71.0.3557.0',
'70.0.3538.27',
'69.0.3497.106',
'71.0.3554.4',
'70.0.3538.26',
'71.0.3556.1',
'71.0.3556.0',
'70.0.3538.25',
'71.0.3554.3',
'69.0.3497.105',
'71.0.3554.2',
'70.0.3538.24',
'69.0.3497.104',
'71.0.3555.2',
'70.0.3538.23',
'71.0.3555.1',
'71.0.3555.0',
'70.0.3538.22',
'69.0.3497.103',
'71.0.3554.1',
'71.0.3554.0',
'70.0.3538.21',
'69.0.3497.102',
'71.0.3553.3',
'70.0.3538.20',
'69.0.3497.101',
'71.0.3553.2',
'69.0.3497.100',
'71.0.3553.1',
'71.0.3553.0',
'70.0.3538.19',
'69.0.3497.99',
'69.0.3497.98',
'69.0.3497.97',
'71.0.3552.6',
'71.0.3552.5',
'71.0.3552.4',
'71.0.3552.3',
'71.0.3552.2',
'71.0.3552.1',
'71.0.3552.0',
'70.0.3538.18',
'69.0.3497.96',
'71.0.3551.3',
'71.0.3551.2',
'71.0.3551.1',
'71.0.3551.0',
'70.0.3538.17',
'69.0.3497.95',
'71.0.3550.3',
'71.0.3550.2',
'71.0.3550.1',
'71.0.3550.0',
'70.0.3538.16',
'69.0.3497.94',
'71.0.3549.1',
'71.0.3549.0',
'70.0.3538.15',
'69.0.3497.93',
'69.0.3497.92',
'71.0.3548.1',
'71.0.3548.0',
'70.0.3538.14',
'69.0.3497.91',
'71.0.3547.1',
'71.0.3547.0',
'70.0.3538.13',
'69.0.3497.90',
'71.0.3546.2',
'69.0.3497.89',
'71.0.3546.1',
'71.0.3546.0',
'70.0.3538.12',
'69.0.3497.88',
'71.0.3545.4',
'71.0.3545.3',
'71.0.3545.2',
'71.0.3545.1',
'71.0.3545.0',
'70.0.3538.11',
'69.0.3497.87',
'71.0.3544.5',
'71.0.3544.4',
'71.0.3544.3',
'71.0.3544.2',
'71.0.3544.1',
'71.0.3544.0',
'69.0.3497.86',
'70.0.3538.10',
'69.0.3497.85',
'70.0.3538.9',
'69.0.3497.84',
'71.0.3543.4',
'70.0.3538.8',
'71.0.3543.3',
'71.0.3543.2',
'71.0.3543.1',
'71.0.3543.0',
'70.0.3538.7',
'69.0.3497.83',
'71.0.3542.2',
'71.0.3542.1',
'71.0.3542.0',
'70.0.3538.6',
'69.0.3497.82',
'69.0.3497.81',
'71.0.3541.1',
'71.0.3541.0',
'70.0.3538.5',
'69.0.3497.80',
'71.0.3540.1',
'71.0.3540.0',
'70.0.3538.4',
'69.0.3497.79',
'70.0.3538.3',
'71.0.3539.1',
'71.0.3539.0',
'69.0.3497.78',
'68.0.3440.134',
'69.0.3497.77',
'70.0.3538.2',
'70.0.3538.1',
'70.0.3538.0',
'69.0.3497.76',
'68.0.3440.133',
'69.0.3497.75',
'70.0.3537.2',
'70.0.3537.1',
'70.0.3537.0',
'69.0.3497.74',
'68.0.3440.132',
'70.0.3536.0',
'70.0.3535.5',
'70.0.3535.4',
'70.0.3535.3',
'69.0.3497.73',
'68.0.3440.131',
'70.0.3532.8',
'70.0.3532.7',
'69.0.3497.72',
'69.0.3497.71',
'70.0.3535.2',
'70.0.3535.1',
'70.0.3535.0',
'69.0.3497.70',
'68.0.3440.130',
'69.0.3497.69',
'68.0.3440.129',
'70.0.3534.4',
'70.0.3534.3',
'70.0.3534.2',
'70.0.3534.1',
'70.0.3534.0',
'69.0.3497.68',
'68.0.3440.128',
'70.0.3533.2',
'70.0.3533.1',
'70.0.3533.0',
'69.0.3497.67',
'68.0.3440.127',
'70.0.3532.6',
'70.0.3532.5',
'70.0.3532.4',
'69.0.3497.66',
'68.0.3440.126',
'70.0.3532.3',
'70.0.3532.2',
'70.0.3532.1',
'69.0.3497.60',
'69.0.3497.65',
'69.0.3497.64',
'70.0.3532.0',
'70.0.3531.0',
'70.0.3530.4',
'70.0.3530.3',
'70.0.3530.2',
'69.0.3497.58',
'68.0.3440.125',
'69.0.3497.57',
'69.0.3497.56',
'69.0.3497.55',
'69.0.3497.54',
'70.0.3530.1',
'70.0.3530.0',
'69.0.3497.53',
'68.0.3440.124',
'69.0.3497.52',
'70.0.3529.3',
'70.0.3529.2',
'70.0.3529.1',
'70.0.3529.0',
'69.0.3497.51',
'70.0.3528.4',
'68.0.3440.123',
'70.0.3528.3',
'70.0.3528.2',
'70.0.3528.1',
'70.0.3528.0',
'69.0.3497.50',
'68.0.3440.122',
'70.0.3527.1',
'70.0.3527.0',
'69.0.3497.49',
'68.0.3440.121',
'70.0.3526.1',
'70.0.3526.0',
'68.0.3440.120',
'69.0.3497.48',
'69.0.3497.47',
'68.0.3440.119',
'68.0.3440.118',
'70.0.3525.5',
'70.0.3525.4',
'70.0.3525.3',
'68.0.3440.117',
'69.0.3497.46',
'70.0.3525.2',
'70.0.3525.1',
'70.0.3525.0',
'69.0.3497.45',
'68.0.3440.116',
'70.0.3524.4',
'70.0.3524.3',
'69.0.3497.44',
'70.0.3524.2',
'70.0.3524.1',
'70.0.3524.0',
'70.0.3523.2',
'69.0.3497.43',
'68.0.3440.115',
'70.0.3505.9',
'69.0.3497.42',
'70.0.3505.8',
'70.0.3523.1',
'70.0.3523.0',
'69.0.3497.41',
'68.0.3440.114',
'70.0.3505.7',
'69.0.3497.40',
'70.0.3522.1',
'70.0.3522.0',
'70.0.3521.2',
'69.0.3497.39',
'68.0.3440.113',
'70.0.3505.6',
'70.0.3521.1',
'70.0.3521.0',
'69.0.3497.38',
'68.0.3440.112',
'70.0.3520.1',
'70.0.3520.0',
'69.0.3497.37',
'68.0.3440.111',
'70.0.3519.3',
'70.0.3519.2',
'70.0.3519.1',
'70.0.3519.0',
'69.0.3497.36',
'68.0.3440.110',
'70.0.3518.1',
'70.0.3518.0',
'69.0.3497.35',
'69.0.3497.34',
'68.0.3440.109',
'70.0.3517.1',
'70.0.3517.0',
'69.0.3497.33',
'68.0.3440.108',
'69.0.3497.32',
'70.0.3516.3',
'70.0.3516.2',
'70.0.3516.1',
'70.0.3516.0',
'69.0.3497.31',
'68.0.3440.107',
'70.0.3515.4',
'68.0.3440.106',
'70.0.3515.3',
'70.0.3515.2',
'70.0.3515.1',
'70.0.3515.0',
'69.0.3497.30',
'68.0.3440.105',
'68.0.3440.104',
'70.0.3514.2',
'70.0.3514.1',
'70.0.3514.0',
'69.0.3497.29',
'68.0.3440.103',
'70.0.3513.1',
'70.0.3513.0',
'69.0.3497.28',
)
return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
std_headers = {
'User-Agent': random_user_agent(),
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
USER_AGENTS = {
'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
}
NO_DEFAULT = object()
ENGLISH_MONTH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
MONTH_NAMES = {
'en': ENGLISH_MONTH_NAMES,
'fr': [
'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
}
KNOWN_EXTENSIONS = (
'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
'flv', 'f4v', 'f4a', 'f4b',
'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
'mkv', 'mka', 'mk3d',
'avi', 'divx',
'mov',
'asf', 'wmv', 'wma',
'3gp', '3g2',
'mp3',
'flac',
'ape',
'wav',
'f4f', 'f4m', 'm3u8', 'smil')
# needed for sanitizing filenames in restricted mode
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
DATE_FORMATS = (
'%d %B %Y',
'%d %b %Y',
'%B %d %Y',
'%B %dst %Y',
'%B %dnd %Y',
'%B %drd %Y',
'%B %dth %Y',
'%b %d %Y',
'%b %dst %Y',
'%b %dnd %Y',
'%b %drd %Y',
'%b %dth %Y',
'%b %dst %Y %I:%M',
'%b %dnd %Y %I:%M',
'%b %drd %Y %I:%M',
'%b %dth %Y %I:%M',
'%Y %m %d',
'%Y-%m-%d',
'%Y.%m.%d.',
'%Y/%m/%d',
'%Y/%m/%d %H:%M',
'%Y/%m/%d %H:%M:%S',
'%Y%m%d%H%M',
'%Y%m%d%H%M%S',
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S:%f',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M',
'%b %d %Y at %H:%M',
'%b %d %Y at %H:%M:%S',
'%B %d %Y at %H:%M',
'%B %d %Y at %H:%M:%S',
'%H:%M %d-%b-%Y',
)
DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
DATE_FORMATS_DAY_FIRST.extend([
'%d-%m-%Y',
'%d.%m.%Y',
'%d.%m.%y',
'%d/%m/%Y',
'%d/%m/%y',
'%d/%m/%Y %H:%M:%S',
])
DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
DATE_FORMATS_MONTH_FIRST.extend([
'%m-%d-%Y',
'%m.%d.%Y',
'%m/%d/%Y',
'%m/%d/%y',
'%m/%d/%Y %H:%M:%S',
])
PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
'TEST'.encode(pref)
except Exception:
pref = 'UTF-8'
return pref
def write_json_file(obj, fn):
""" Encode obj as JSON and write it to fn, atomically if possible """
fn = encodeFilename(fn)
if sys.version_info < (3, 0) and sys.platform != 'win32':
encoding = get_filesystem_encoding()
# os.path.basename returns a bytes object, but NamedTemporaryFile
# will fail if the filename contains non ascii characters unless we
# use a unicode object
path_basename = lambda f: os.path.basename(fn).decode(encoding)
# the same for os.path.dirname
path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
else:
path_basename = os.path.basename
path_dirname = os.path.dirname
args = {
'suffix': '.tmp',
'prefix': path_basename(fn) + '.',
'dir': path_dirname(fn),
'delete': False,
}
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3, 0):
args['mode'] = 'wb'
else:
args.update({
'mode': 'w',
'encoding': 'utf-8',
})
tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
try:
with tf:
json.dump(obj, tf)
if sys.platform == 'win32':
# Need to remove existing file on Windows, else os.rename raises
# WindowsError or FileExistsError.
try:
os.unlink(fn)
except OSError:
pass
try:
mask = os.umask(0)
os.umask(mask)
os.chmod(tf.name, 0o666 & ~mask)
except OSError:
pass
os.rename(tf.name, fn)
except Exception:
try:
os.remove(tf.name)
except OSError:
pass
raise
if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z_-]+$', key)
expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val=None):
for f in node.findall(compat_xpath(xpath)):
if key not in f.attrib:
continue
if val is None or f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
def _find_xpath(xpath):
return node.find(compat_xpath(xpath))
if isinstance(xpath, (str, compat_str)):
n = _find_xpath(xpath)
else:
for xp in xpath:
n = _find_xpath(xp)
if n is not None:
break
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
return n
def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
n = xpath_element(node, xpath, name, fatal=fatal, default=default)
if n is None or n == default:
return n
if n.text is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element\'s text %s' % name)
else:
return None
return n.text
def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
n = find_xpath_attr(node, xpath, key)
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = '%s[@%s]' % (xpath, key) if name is None else name
raise ExtractorError('Could not find XML attribute %s' % name)
else:
return None
return n.attrib[key]
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute('id', id, html)
def get_element_by_class(class_name, html):
"""Return the content of the first tag with the specified class in the passed HTML document"""
retval = get_elements_by_class(class_name, html)
return retval[0] if retval else None
def get_element_by_attribute(attribute, value, html, escape_value=True):
retval = get_elements_by_attribute(attribute, value, html, escape_value)
return retval[0] if retval else None
def get_elements_by_class(class_name, html):
"""Return the content of all tags with the specified class in the passed HTML document as a list"""
return get_elements_by_attribute(
'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
html, escape_value=False)
def get_elements_by_attribute(attribute, value, html, escape_value=True):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
value = re.escape(value) if escape_value else value
retlist = []
for m in re.finditer(r'''(?xs)
<([a-zA-Z0-9:._-]+)
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s+%s=['"]?%s['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s*>
(?P<content>.*?)
</\1>
''' % (re.escape(attribute), value), html):
res = m.group('content')
if res.startswith('"') or res.startswith("'"):
res = res[1:-1]
retlist.append(unescapeHTML(res))
return retlist
class HTMLAttributeParser(compat_HTMLParser):
"""Trivial HTML parser to gather the attributes for a single element"""
def __init__(self):
self.attrs = {}
compat_HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.attrs = dict(attrs)
class HTMLListAttrsParser(compat_HTMLParser):
"""HTML parser to gather the attributes for the elements of a list"""
def __init__(self):
compat_HTMLParser.__init__(self)
self.items = []
self._level = 0
def handle_starttag(self, tag, attrs):
if tag == 'li' and self._level == 0:
self.items.append(dict(attrs))
self._level += 1
def handle_endtag(self, tag):
self._level -= 1
def extract_attributes(html_element):
"""Given a string for an HTML element such as
<el
a="foo" B="bar" c="&98;az" d=boz
empty= noval entity="&"
sq='"' dq="'"
>
Decode and return a dictionary of attributes.
{
'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
'empty': '', 'noval': None, 'entity': '&',
'sq': '"', 'dq': '\''
}.
NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
"""
parser = HTMLAttributeParser()
try:
parser.feed(html_element)
parser.close()
# Older Python may throw HTMLParseError in case of malformed HTML
except compat_HTMLParseError:
pass
return parser.attrs
def parse_list(webpage):
"""Given a string for an series of HTML <li> elements,
return a dictionary of their attributes"""
parser = HTMLListAttrsParser()
parser.feed(webpage)
parser.close()
return parser.items
def clean_html(html):
"""Clean an HTML snippet into a readable string"""
if html is None: # Convenience for sanitizing descriptions etc.
return html
# Newline vs <br />
html = html.replace('\n', ' ')
html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html.strip()
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
if filename == '-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
if err.errno in (errno.EACCES,):
raise
# In case of error, try to remove win32 forbidden chars
alt_filename = sanitize_path(filename)
if alt_filename == filename:
raise
else:
# An exception here should be caught in the caller
stream = open(encodeFilename(alt_filename), open_mode)
return (stream, alt_filename)
def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept
if possible.
"""
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
elif not restricted and char == '\n':
return ' '
elif char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
if s == '':
return ''
# Handle timestamps
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def sanitize_path(s, force=False):
"""Sanitizes and normalizes path on Windows"""
if sys.platform == 'win32':
force = False
drive_or_unc, _ = os.path.splitdrive(s)
if sys.version_info < (2, 7) and not drive_or_unc:
drive_or_unc, _ = os.path.splitunc(s)
elif force:
drive_or_unc = ''
else:
return s
norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
elif force and s[0] == os.path.sep:
sanitized_path.insert(0, os.path.sep)
return os.path.join(*sanitized_path)
def sanitize_url(url):
# Prepend protocol-less URLs with `http:` scheme in order to mitigate
# the number of unwanted failures due to missing protocol
if url.startswith('//'):
return 'http:%s' % url
# Fix some common typos seen so far
COMMON_TYPOS = (
# https://github.com/ytdl-org/youtube-dl/issues/15649
(r'^httpss://', r'https://'),
# https://bx1.be/lives/direct-tv/
(r'^rmtp([es]?)://', r'rtmp\1://'),
)
for mistake, fixup in COMMON_TYPOS:
if re.match(mistake, url):
return re.sub(mistake, fixup, url)
return url
def extract_basic_auth(url):
parts = compat_urlparse.urlsplit(url)
if parts.username is None:
return url, None
url = compat_urlparse.urlunsplit(parts._replace(netloc=(
parts.hostname if parts.port is None
else '%s:%d' % (parts.hostname, parts.port))))
auth_payload = base64.b64encode(
('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
return url, 'Basic ' + auth_payload.decode('utf-8')
def sanitized_Request(url, *args, **kwargs):
url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
if auth_header is not None:
headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
headers['Authorization'] = auth_header
return compat_urllib_request.Request(url, *args, **kwargs)
def expand_path(s):
"""Expand shell variables and ~"""
return os.path.expandvars(compat_expanduser(s))
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
def _htmlentity_transform(entity_with_semicolon):
"""Transforms an HTML entity to a character."""
entity = entity_with_semicolon[:-1]
# Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
# TODO: HTML5 allows entities without a semicolon. For example,
# 'Éric' should be decoded as 'Éric'.
if entity_with_semicolon in compat_html_entities_html5:
return compat_html_entities_html5[entity_with_semicolon]
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith('x'):
base = 16
numstr = '0%s' % numstr
else:
base = 10
# See https://github.com/ytdl-org/youtube-dl/issues/7518
try:
return compat_chr(int(numstr, base))
except ValueError:
pass
# Unknown entity in name, return its literal representation
return '&%s;' % entity
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(
r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
def escapeHTML(text):
return (
text
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.replace('"', '"')
.replace("'", ''')
)
def process_communicate_or_kill(p, *args, **kwargs):
try:
return p.communicate(*args, **kwargs)
except BaseException: # Including KeyboardInterrupt
p.kill()
p.wait()
raise
class Popen(subprocess.Popen):
if sys.platform == 'win32':
_startupinfo = subprocess.STARTUPINFO()
_startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
_startupinfo = None
def __init__(self, *args, **kwargs):
super(Popen, self).__init__(*args, **kwargs, startupinfo=self._startupinfo)
def communicate_or_kill(self, *args, **kwargs):
return process_communicate_or_kill(self, *args, **kwargs)
def get_subprocess_encoding():
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding()
else:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
return encoding
def encodeFilename(s, for_subprocess=False):
"""
@param s The name of the file
"""
assert type(s) == compat_str
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
# Pass '' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
return s
# Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
if sys.platform.startswith('java'):
return s
return s.encode(get_subprocess_encoding(), 'ignore')
def decodeFilename(b, for_subprocess=False):
if sys.version_info >= (3, 0):
return b
if not isinstance(b, bytes):
return b
return b.decode(get_subprocess_encoding(), 'ignore')
def encodeArgument(s):
if not isinstance(s, compat_str):
# Legacy code that uses byte strings
# Uncomment the following line after fixing all post processors
# assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
s = s.decode('ascii')
return encodeFilename(s, True)
def decodeArgument(b):
return decodeFilename(b, True)
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
_timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
def timetuple_from_msec(msec):
secs, msec = divmod(msec, 1000)
mins, secs = divmod(secs, 60)
hrs, mins = divmod(mins, 60)
return _timetuple(hrs, mins, secs, msec)
def formatSeconds(secs, delim=':', msec=False):
time = timetuple_from_msec(secs * 1000)
if time.hours:
ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
elif time.minutes:
ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
else:
ret = '%d' % time.seconds
return '%s.%03d' % (ret, time.milliseconds) if msec else ret
def _ssl_load_windows_store_certs(ssl_context, storename):
# Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
try:
certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
if encoding == 'x509_asn' and (
trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
except PermissionError:
return
for cert in certs:
try:
ssl_context.load_verify_locations(cadata=cert)
except ssl.SSLError:
pass
def make_HTTPS_handler(params, **kwargs):
opts_check_certificate = not params.get('nocheckcertificate')
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = opts_check_certificate
context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
if opts_check_certificate:
try:
context.load_default_certs()
# Work around the issue in load_default_certs when there are bad certificates. See:
# https://github.com/yt-dlp/yt-dlp/issues/1060,
# https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
except ssl.SSLError:
# enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
# Create a new context to discard any certificates that were already loaded
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname, context.verify_mode = True, ssl.CERT_REQUIRED
for storename in ('CA', 'ROOT'):
_ssl_load_windows_store_certs(context, storename)
context.set_default_verify_paths()
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
def bug_reports_message(before=';'):
if ytdl_is_updateable():
update_cmd = 'type yt-dlp -U to update'
else:
update_cmd = 'see https://github.com/yt-dlp/yt-dlp on how to update'
msg = 'please report this issue on https://github.com/yt-dlp/yt-dlp .'
msg += ' Make sure you are using the latest version; %s.' % update_cmd
msg += ' Be sure to call yt-dlp with the --verbose flag and include its complete output.'
before = before.rstrip()
if not before or before.endswith(('.', '!', '?')):
msg = msg[0].title() + msg[1:]
return (before + ' ' if before else '') + msg
class YoutubeDLError(Exception):
"""Base exception for YoutubeDL errors."""
msg = None
def __init__(self, msg=None):
if msg is not None:
self.msg = msg
elif self.msg is None:
self.msg = type(self).__name__
super().__init__(self.msg)
network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
if hasattr(ssl, 'CertificateError'):
network_exceptions.append(ssl.CertificateError)
network_exceptions = tuple(network_exceptions)
class ExtractorError(YoutubeDLError):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
"""
if sys.exc_info()[0] in network_exceptions:
expected = True
self.msg = str(msg)
self.traceback = tb
self.expected = expected
self.cause = cause
self.video_id = video_id
self.ie = ie
self.exc_info = sys.exc_info() # preserve original exception
super(ExtractorError, self).__init__(''.join((
format_field(ie, template='[%s] '),
format_field(video_id, template='%s: '),
self.msg,
format_field(cause, template=' (caused by %r)'),
'' if expected else bug_reports_message())))
def format_traceback(self):
if self.traceback is None:
return None
return ''.join(traceback.format_tb(self.traceback))
class UnsupportedError(ExtractorError):
def __init__(self, url):
super(UnsupportedError, self).__init__(
'Unsupported URL: %s' % url, expected=True)
self.url = url
class RegexNotFoundError(ExtractorError):
"""Error when a regex didn't match"""
pass
class GeoRestrictedError(ExtractorError):
"""Geographic restriction Error exception.
This exception may be thrown when a video is not available from your
geographic location due to geographic restrictions imposed by a website.
"""
def __init__(self, msg, countries=None, **kwargs):
kwargs['expected'] = True
super(GeoRestrictedError, self).__init__(msg, **kwargs)
self.countries = countries
class DownloadError(YoutubeDLError):
"""Download Error exception.
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class EntryNotInPlaylist(YoutubeDLError):
"""Entry not in playlist exception.
This exception will be thrown by YoutubeDL when a requested entry
is not found in the playlist info_dict
"""
msg = 'Entry not found in info'
class SameFileError(YoutubeDLError):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
msg = 'Fixed output name but more than one file to download'
def __init__(self, filename=None):
if filename is not None:
self.msg += f': {filename}'
super().__init__(self.msg)
class PostProcessingError(YoutubeDLError):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
class DownloadCancelled(YoutubeDLError):
""" Exception raised when the download queue should be interrupted """
msg = 'The download was cancelled'
class ExistingVideoReached(DownloadCancelled):
""" --break-on-existing triggered """
msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
class RejectedVideoReached(DownloadCancelled):
""" --break-on-reject triggered """
msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
class MaxDownloadsReached(DownloadCancelled):
""" --max-downloads limit has been reached. """
msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
class ReExtractInfo(YoutubeDLError):
""" Video info needs to be re-extracted. """
def __init__(self, msg, expected=False):
super().__init__(msg)
self.expected = expected
class ThrottledDownload(ReExtractInfo):
""" Download speed below --throttled-rate. """
msg = 'The download speed is below throttle limit'
def __init__(self, msg):
super().__init__(msg, expected=False)
class UnavailableVideoError(YoutubeDLError):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
msg = 'Unable to download video'
def __init__(self, err=None):
if err is not None:
self.msg += f': {err}'
super().__init__(self.msg)
class ContentTooShortError(YoutubeDLError):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
def __init__(self, downloaded, expected):
super(ContentTooShortError, self).__init__(
'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
)
# Both in bytes
self.downloaded = downloaded
self.expected = expected
class XAttrMetadataError(YoutubeDLError):
def __init__(self, code=None, msg='Unknown error'):
super(XAttrMetadataError, self).__init__(msg)
self.code = code
self.msg = msg
# Parsing code and msg
if (self.code in (errno.ENOSPC, errno.EDQUOT)
or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
self.reason = 'NO_SPACE'
elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
self.reason = 'VALUE_TOO_LONG'
else:
self.reason = 'NOT_SUPPORTED'
class XAttrUnavailableError(YoutubeDLError):
pass
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
# Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
# expected HTTP responses to meet HTTP/1.0 or later (see also
# https://github.com/ytdl-org/youtube-dl/issues/6727)
if sys.version_info < (3, 0):
kwargs['strict'] = True
hc = http_class(*args, **compat_kwargs(kwargs))
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
# This is to workaround _create_connection() from socket where it will try all
# address data from getaddrinfo() including IPv6. This filters the result from
# getaddrinfo() based on the source_address value.
# This is based on the cpython socket.create_connection() function.
# https://github.com/python/cpython/blob/master/Lib/socket.py#L691
def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
host, port = address
err = None
addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
ip_addrs = [addr for addr in addrs if addr[0] == af]
if addrs and not ip_addrs:
ip_version = 'v4' if af == socket.AF_INET else 'v6'
raise socket.error(
"No remote IP%s addresses available for connect, can't use '%s' as source address"
% (ip_version, source_address[0]))
for res in ip_addrs:
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.bind(source_address)
sock.connect(sa)
err = None # Explicitly break reference cycle
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
if hasattr(hc, '_create_connection'):
hc._create_connection = _create_connection
sa = (source_address, 0)
if hasattr(hc, 'source_address'): # Python 2.7+
hc.source_address = sa
else: # Python 2.6
def _hc_connect(self, *args, **kwargs):
sock = _create_connection(
(self.host, self.port), self.timeout, sa)
if is_https:
self.sock = ssl.wrap_socket(
sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
self.sock = sock
hc.connect = functools.partial(_hc_connect, hc)
return hc
def handle_youtubedl_headers(headers):
filtered_headers = headers
if 'Youtubedl-no-compression' in filtered_headers:
filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
del filtered_headers['Youtubedl-no-compression']
return filtered_headers
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-no-compression", which will be
removed before making the real request.
Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
def __init__(self, params, *args, **kwargs):
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
self._params = params
def http_open(self, req):
conn_class = compat_http_client.HTTPConnection
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, False),
req)
@staticmethod
def deflate(data):
if not data:
return data
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
def http_request(self, req):
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
# non-ASCII characters (see telemb.py, ard.py [#3412])
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
# Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
# the code of this workaround has been moved here from YoutubeDL.urlopen()
url = req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
req = update_Request(req, url=url_escaped)
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
req.headers = handle_youtubedl_headers(req.headers)
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
req._Request__original = req._Request__original.partition('#')[0]
req._Request__r_type = req._Request__r_type.partition('#')[0]
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
content = resp.read()
gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
try:
uncompressed = io.BytesIO(gz.read())
except IOError as original_ioerror:
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range(1, 1024):
try:
gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
uncompressed = io.BytesIO(gz.read())
except IOError:
continue
break
else:
raise original_ioerror
resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
# https://github.com/ytdl-org/youtube-dl/issues/6457).
if 300 <= resp.code < 400:
location = resp.headers.get('Location')
if location:
# As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
if sys.version_info >= (3, 0):
location = location.encode('iso-8859-1').decode('utf-8')
else:
location = location.decode('utf-8')
location_escaped = escape_url(location)
if location != location_escaped:
del resp.headers['Location']
if sys.version_info < (3, 0):
location_escaped = location_escaped.encode('utf-8')
resp.headers['Location'] = location_escaped
return resp
https_request = http_request
https_response = http_response
def make_socks_conn_class(base_class, socks_proxy):
assert issubclass(base_class, (
compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
url_components = compat_urlparse.urlparse(socks_proxy)
if url_components.scheme.lower() == 'socks5':
socks_type = ProxyType.SOCKS5
elif url_components.scheme.lower() in ('socks', 'socks4'):
socks_type = ProxyType.SOCKS4
elif url_components.scheme.lower() == 'socks4a':
socks_type = ProxyType.SOCKS4A
def unquote_if_non_empty(s):
if not s:
return s
return compat_urllib_parse_unquote_plus(s)
proxy_args = (
socks_type,
url_components.hostname, url_components.port or 1080,
True, # Remote DNS
unquote_if_non_empty(url_components.username),
unquote_if_non_empty(url_components.password),
)
class SocksConnection(base_class):
def connect(self):
self.sock = sockssocket()
self.sock.setproxy(*proxy_args)
if type(self.timeout) in (int, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
if isinstance(self, compat_http_client.HTTPSConnection):
if hasattr(self, '_context'): # Python > 2.6
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
else:
self.sock = ssl.wrap_socket(self.sock)
return SocksConnection
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
def __init__(self, params, https_conn_class=None, *args, **kwargs):
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
self._params = params
def https_open(self, req):
kwargs = {}
conn_class = self._https_conn_class
if hasattr(self, '_context'): # python > 2.6
kwargs['context'] = self._context
if hasattr(self, '_check_hostname'): # python 3.x
kwargs['check_hostname'] = self._check_hostname
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, True),
req, **kwargs)
class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
"""
See [1] for cookie file format.
1. https://curl.haxx.se/docs/http-cookies.html
"""
_HTTPONLY_PREFIX = '#HttpOnly_'
_ENTRY_LEN = 7
_HEADER = '''# Netscape HTTP Cookie File
# This file is generated by yt-dlp. Do not edit.
'''
_CookieFileEntry = collections.namedtuple(
'CookieFileEntry',
('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""
Save cookies to a file.
Most of the code is taken from CPython 3.8 and slightly adapted
to support cookie files with UTF-8 in both python 2 and 3.
"""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
# Store session cookies with `expires` set to 0 instead of an empty
# string
for cookie in self:
if cookie.expires is None:
cookie.expires = 0
with io.open(filename, 'w', encoding='utf-8') as f:
f.write(self._HEADER)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure:
secure = 'TRUE'
else:
secure = 'FALSE'
if cookie.domain.startswith('.'):
initial_dot = 'TRUE'
else:
initial_dot = 'FALSE'
if cookie.expires is not None:
expires = compat_str(cookie.expires)
else:
expires = ''
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = ''
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
'\t'.join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value]) + '\n')
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
def prepare_line(line):
if line.startswith(self._HTTPONLY_PREFIX):
line = line[len(self._HTTPONLY_PREFIX):]
# comments and empty lines are fine
if line.startswith('#') or not line.strip():
return line
cookie_list = line.split('\t')
if len(cookie_list) != self._ENTRY_LEN:
raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
cookie = self._CookieFileEntry(*cookie_list)
if cookie.expires_at and not cookie.expires_at.isdigit():
raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
return line
cf = io.StringIO()
with io.open(filename, encoding='utf-8') as f:
for line in f:
try:
cf.write(prepare_line(line))
except compat_cookiejar.LoadError as e:
write_string(
'WARNING: skipping cookie file entry due to %s: %r\n'
% (e, line), sys.stderr)
continue
cf.seek(0)
self._really_load(cf, filename, ignore_discard, ignore_expires)
# Session cookies are denoted by either `expires` field set to
# an empty string or 0. MozillaCookieJar only recognizes the former
# (see [1]). So we need force the latter to be recognized as session
# cookies on our own.
# Session cookies may be important for cookies-based authentication,
# e.g. usually, when user does not check 'Remember me' check box while
# logging in on a site, some important cookies are stored as session
# cookies so that not recognizing them will result in failed login.
# 1. https://bugs.python.org/issue17164
for cookie in self:
# Treat `expires=0` cookies as session cookies
if cookie.expires == 0:
cookie.expires = None
cookie.discard = True
class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
def __init__(self, cookiejar=None):
compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
def http_response(self, request, response):
# Python 2 will choke on next HTTP request in row if there are non-ASCII
# characters in Set-Cookie HTTP header of last response (see
# https://github.com/ytdl-org/youtube-dl/issues/6769).
# In order to at least prevent crashing we will percent encode Set-Cookie
# header before HTTPCookieProcessor starts processing it.
# if sys.version_info < (3, 0) and response.headers:
# for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
# set_cookie = response.headers.get(set_cookie_header)
# if set_cookie:
# set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
# if set_cookie != set_cookie_escaped:
# del response.headers[set_cookie_header]
# response.headers[set_cookie_header] = set_cookie_escaped
return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
https_request = compat_urllib_request.HTTPCookieProcessor.http_request
https_response = http_response
class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
"""YoutubeDL redirect handler
The code is based on HTTPRedirectHandler implementation from CPython [1].
This redirect handler solves two issues:
- ensures redirect URL is always unicode under python 2
- introduces support for experimental HTTP response status code
308 Permanent Redirect [2] used by some sites [3]
1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
3. https://github.com/ytdl-org/youtube-dl/issues/28768
"""
http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST")):
raise compat_HTTPError(req.full_url, code, msg, headers, fp)
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib.request, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# On python 2 urlh.geturl() may sometimes return redirect URL
# as byte string instead of unicode. This workaround allows
# to force it always return unicode.
if sys.version_info[0] < 3:
newurl = compat_str(newurl)
# Be conciliant with URIs containing a space. This is mainly
# redundant with the more complete encoding done in http_error_302(),
# but it is kept for compatibility with other callers.
newurl = newurl.replace(' ', '%20')
CONTENT_HEADERS = ("content-length", "content-type")
# NB: don't use dict comprehension for python 2.6 compatibility
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in CONTENT_HEADERS)
return compat_urllib_request.Request(
newurl, headers=newheaders, origin_req_host=req.origin_req_host,
unverifiable=True)
def extract_timezone(date_str):
m = re.search(
r'''(?x)
^.{8,}? # >=8 char non-TZ prefix, if present
(?P<tz>Z| # just the UTC Z, or
(?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
(?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
[ ]? # optional space
(?P<sign>\+|-) # +/-
(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
$)
''', date_str)
if not m:
timezone = datetime.timedelta()
else:
date_str = date_str[:-len(m.group('tz'))]
if not m.group('sign'):
timezone = datetime.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
return timezone, date_str
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
if date_str is None:
return None
date_str = re.sub(r'\.[0-9]+', '', date_str)
if timezone is None:
timezone, date_str = extract_timezone(date_str)
try:
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
dt = datetime.datetime.strptime(date_str, date_format) - timezone
return calendar.timegm(dt.timetuple())
except ValueError:
pass
def date_formats(day_first=True):
return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
def unified_strdate(date_str, day_first=True):
"""Return a string with the date in the format YYYYMMDD"""
if date_str is None:
return None
upload_date = None
# Replace commas
date_str = date_str.replace(',', ' ')
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
_, date_str = extract_timezone(date_str)
for expression in date_formats(day_first):
try:
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
try:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is not None:
return compat_str(upload_date)
def unified_timestamp(date_str, day_first=True):
if date_str is None:
return None
date_str = re.sub(r'[,|]', '', date_str)
pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
timezone, date_str = extract_timezone(date_str)
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
# Remove unrecognized timezones from ISO 8601 alike timestamps
m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
if m:
date_str = date_str[:-len(m.group('tz'))]
# Python only supports microseconds, so remove nanoseconds
m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
if m:
date_str = m.group(1)
for expression in date_formats(day_first):
try:
dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
return calendar.timegm(dt.timetuple())
except ValueError:
pass
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
return calendar.timegm(timetuple) + pm_delta * 3600
def determine_ext(url, default_ext='unknown_video'):
if url is None or '.' not in url:
return default_ext
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
# Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
elif guess.rstrip('/') in KNOWN_EXTENSIONS:
return guess.rstrip('/')
else:
return default_ext
def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
format: string date format used to return datetime object from
precision: round the time portion of a datetime object.
auto|microsecond|second|minute|hour|day.
auto: round to the unit provided in date_str (if applicable).
"""
auto_precision = False
if precision == 'auto':
auto_precision = True
precision = 'microsecond'
today = datetime_round(datetime.datetime.now(), precision)
if date_str in ('now', 'today'):
return today
if date_str == 'yesterday':
return today - datetime.timedelta(days=1)
match = re.match(
r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
date_str)
if match is not None:
start_time = datetime_from_str(match.group('start'), precision, format)
time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
unit = match.group('unit')
if unit == 'month' or unit == 'year':
new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
unit = 'day'
else:
if unit == 'week':
unit = 'day'
time *= 7
delta = datetime.timedelta(**{unit + 's': time})
new_date = start_time + delta
if auto_precision:
return datetime_round(new_date, unit)
return new_date
return datetime_round(datetime.datetime.strptime(date_str, format), precision)
def date_from_str(date_str, format='%Y%m%d'):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
format: string date format used to return datetime object from
"""
return datetime_from_str(date_str, precision='microsecond', format=format).date()
def datetime_add_months(dt, months):
"""Increment/Decrement a datetime object by months."""
month = dt.month + months - 1
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year, month, day)
def datetime_round(dt, precision='day'):
"""
Round a datetime object's time to a specific precision
"""
if precision == 'microsecond':
return dt
unit_seconds = {
'day': 86400,
'hour': 3600,
'minute': 60,
'second': 1,
}
roundto = lambda x, n: ((x + n / 2) // n) * n
timestamp = calendar.timegm(dt.timetuple())
return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
if match is not None:
return '-'.join(match.groups())
else:
return date_str
class DateRange(object):
"""Represents a time interval between two dates"""
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.start = date_from_str(start)
else:
self.start = datetime.datetime.min.date()
if end is not None:
self.end = date_from_str(end)
else:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
return cls(day, day)
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
def __str__(self):
return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
def platform_name():
""" Returns the platform name as a compat_str """
res = platform.platform()
if isinstance(res, bytes):
res = res.decode(preferredencoding())
assert isinstance(res, compat_str)
return res
def get_windows_version():
''' Get Windows version. None if it's not running on Windows '''
if compat_os_name == 'nt':
return version_tuple(platform.win32_ver()[1])
else:
return None
def _windows_write_string(s, out):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
import ctypes
import ctypes.wintypes
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
('GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
('GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True
def write_string(s, out=None, encoding=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
if _windows_write_string(s, out):
return
if ('b' in getattr(out, 'mode', '')
or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt)
elif hasattr(out, 'buffer'):
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
byt = s.encode(enc, 'ignore')
out.buffer.write(byt)
else:
out.write(s)
out.flush()
def bytes_to_intlist(bs):
if not bs:
return []
if isinstance(bs[0], int): # Python 3
return list(bs)
else:
return [ord(c) for c in bs]
def intlist_to_bytes(xs):
if not xs:
return b''
return compat_struct_pack('%dB' % len(xs), *xs)
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import msvcrt
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.LPVOID),
('InternalHigh', ctypes.wintypes.LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
kernel32 = ctypes.windll.kernel32
LockFileEx = kernel32.LockFileEx
LockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwFlags
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
LockFileEx.restype = ctypes.wintypes.BOOL
UnlockFileEx = kernel32.UnlockFileEx
UnlockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
UnlockFileEx.restype = ctypes.wintypes.BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file(f, exclusive):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
handle = msvcrt.get_osfhandle(f.fileno())
if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
if not UnlockFileEx(handle, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
# Some platforms, such as Jython, is missing fcntl
try:
import fcntl
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
fcntl.flock(f, fcntl.LOCK_UN)
except ImportError:
UNSUPPORTED_MSG = 'file locking is not supported on this platform'
def _lock_file(f, exclusive):
raise IOError(UNSUPPORTED_MSG)
def _unlock_file(f):
raise IOError(UNSUPPORTED_MSG)
class locked_file(object):
def __init__(self, filename, mode, encoding=None):
assert mode in ['r', 'a', 'w']
self.f = io.open(filename, mode, encoding=encoding)
self.mode = mode
def __enter__(self):
exclusive = self.mode != 'r'
try:
_lock_file(self.f, exclusive)
except IOError:
self.f.close()
raise
return self
def __exit__(self, etype, value, traceback):
try:
_unlock_file(self.f)
finally:
self.f.close()
def __iter__(self):
return iter(self.f)
def write(self, *args):
return self.f.write(*args)
def read(self, *args):
return self.f.read(*args)
def get_filesystem_encoding():
encoding = sys.getfilesystemencoding()
return encoding if encoding is not None else 'utf-8'
def shell_quote(args):
quoted_args = []
encoding = get_filesystem_encoding()
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
a = a.decode(encoding)
quoted_args.append(compat_shlex_quote(a))
return ' '.join(quoted_args)
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
url, idata = unsmuggle_url(url, {})
data.update(idata)
sdata = compat_urllib_parse_urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
def unsmuggle_url(smug_url, default=None):
if '#__youtubedl_smuggle' not in smug_url:
return smug_url, default
url, _, sdata = smug_url.rpartition('#')
jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
data = json.loads(jsond)
return url, data
def format_bytes(bytes):
if bytes is None:
return 'N/A'
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(bytes, 1024.0))
suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
converted = float(bytes) / float(1024 ** exponent)
return '%.2f%s' % (converted, suffix)
def lookup_unit_table(unit_table, s):
units_re = '|'.join(re.escape(u) for u in unit_table)
m = re.match(
r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
if not m:
return None
num_str = m.group('num').replace(',', '.')
mult = unit_table[m.group('unit')]
return int(float(num_str) * mult)
def parse_filesize(s):
if s is None:
return None
# The lower-case forms are of course incorrect and unofficial,
# but we support those too
_UNIT_TABLE = {
'B': 1,
'b': 1,
'bytes': 1,
'KiB': 1024,
'KB': 1000,
'kB': 1024,
'Kb': 1000,
'kb': 1000,
'kilobytes': 1000,
'kibibytes': 1024,
'MiB': 1024 ** 2,
'MB': 1000 ** 2,
'mB': 1024 ** 2,
'Mb': 1000 ** 2,
'mb': 1000 ** 2,
'megabytes': 1000 ** 2,
'mebibytes': 1024 ** 2,
'GiB': 1024 ** 3,
'GB': 1000 ** 3,
'gB': 1024 ** 3,
'Gb': 1000 ** 3,
'gb': 1000 ** 3,
'gigabytes': 1000 ** 3,
'gibibytes': 1024 ** 3,
'TiB': 1024 ** 4,
'TB': 1000 ** 4,
'tB': 1024 ** 4,
'Tb': 1000 ** 4,
'tb': 1000 ** 4,
'terabytes': 1000 ** 4,
'tebibytes': 1024 ** 4,
'PiB': 1024 ** 5,
'PB': 1000 ** 5,
'pB': 1024 ** 5,
'Pb': 1000 ** 5,
'pb': 1000 ** 5,
'petabytes': 1000 ** 5,
'pebibytes': 1024 ** 5,
'EiB': 1024 ** 6,
'EB': 1000 ** 6,
'eB': 1024 ** 6,
'Eb': 1000 ** 6,
'eb': 1000 ** 6,
'exabytes': 1000 ** 6,
'exbibytes': 1024 ** 6,
'ZiB': 1024 ** 7,
'ZB': 1000 ** 7,
'zB': 1024 ** 7,
'Zb': 1000 ** 7,
'zb': 1000 ** 7,
'zettabytes': 1000 ** 7,
'zebibytes': 1024 ** 7,
'YiB': 1024 ** 8,
'YB': 1000 ** 8,
'yB': 1024 ** 8,
'Yb': 1000 ** 8,
'yb': 1000 ** 8,
'yottabytes': 1000 ** 8,
'yobibytes': 1024 ** 8,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_count(s):
if s is None:
return None
s = s.strip()
if re.match(r'^[\d,.]+$', s):
return str_to_int(s)
_UNIT_TABLE = {
'k': 1000,
'K': 1000,
'm': 1000 ** 2,
'M': 1000 ** 2,
'kk': 1000 ** 2,
'KK': 1000 ** 2,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_resolution(s):
if s is None:
return {}
mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
if mobj:
return {
'width': int(mobj.group('w')),
'height': int(mobj.group('h')),
}
mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
if mobj:
return {'height': int(mobj.group(1))}
mobj = re.search(r'\b([48])[kK]\b', s)
if mobj:
return {'height': int(mobj.group(1)) * 540}
return {}
def parse_bitrate(s):
if not isinstance(s, compat_str):
return
mobj = re.search(r'\b(\d+)\s*kbps', s)
if mobj:
return int(mobj.group(1))
def month_by_name(name, lang='en'):
""" Return the number of a month by (locale-independently) English name """
month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
try:
return month_names.index(name) + 1
except ValueError:
return None
def month_by_abbreviation(abbrev):
""" Return the number of a month by (locale-independently) English
abbreviations """
try:
return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
except ValueError:
return None
def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
return re.sub(
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
'&',
xml_str)
def setproctitle(title):
assert isinstance(title, compat_str)
# ctypes in Jython is not complete
# http://bugs.jython.org/issue2148
if sys.platform.startswith('java'):
return
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
except OSError:
return
except TypeError:
# LoadLibrary in Windows Python 2.7.13 only expects
# a bytestring, but since unicode_literals turns
# every string into a unicode string, it fails.
return
title_bytes = title.encode('utf-8')
buf = ctypes.create_string_buffer(len(title_bytes))
buf.value = title_bytes
try:
libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
return s[len(start):] if s is not None and s.startswith(start) else s
def remove_end(s, end):
return s[:-len(end)] if s is not None and s.endswith(end) else s
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'", ):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def get_domain(url):
domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url)
return domain.group('domain') if domain else None
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
def base_url(url):
return re.match(r'https?://[^?#&]+/', url).group()
def urljoin(base, path):
if isinstance(path, bytes):
path = path.decode('utf-8')
if not isinstance(path, compat_str) or not path:
return None
if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
return path
if isinstance(base, bytes):
base = base.decode('utf-8')
if not isinstance(base, compat_str) or not re.match(
r'^(?:https?:)?//', base):
return None
return compat_urlparse.urljoin(base, path)
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return 'HEAD'
class PUTRequest(compat_urllib_request.Request):
def get_method(self):
return 'PUT'
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
if v == '':
v = None
if v is None:
return default
try:
return int(v) * invscale // scale
except (ValueError, TypeError, OverflowError):
return default
def str_or_none(v, default=None):
return default if v is None else compat_str(v)
def str_to_int(int_str):
""" A more relaxed version of int_or_none """
if isinstance(int_str, compat_integer_types):
return int_str
elif isinstance(int_str, compat_str):
int_str = re.sub(r'[,\.\+]', '', int_str)
return int_or_none(int_str)
def float_or_none(v, scale=1, invscale=1, default=None):
if v is None:
return default
try:
return float(v) * invscale / scale
except (ValueError, TypeError):
return default
def bool_or_none(v, default=None):
return v if isinstance(v, bool) else default
def strip_or_none(v, default=None):
return v.strip() if isinstance(v, compat_str) else default
def url_or_none(url):
if not url or not isinstance(url, compat_str):
return None
url = url.strip()
return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
def strftime_or_none(timestamp, date_format, default=None):
datetime_object = None
try:
if isinstance(timestamp, compat_numeric_types): # unix timestamp
datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
elif isinstance(timestamp, compat_str): # assume YYYYMMDD
datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
return datetime_object.strftime(date_format)
except (ValueError, TypeError, AttributeError):
return default
def parse_duration(s):
if not isinstance(s, compat_basestring):
return None
s = s.strip()
days, hours, mins, secs, ms = [None] * 5
m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(
r'''(?ix)(?:P?
(?:
[0-9]+\s*y(?:ears?)?\s*
)?
(?:
[0-9]+\s*m(?:onths?)?\s*
)?
(?:
[0-9]+\s*w(?:eeks?)?\s*
)?
(?:
(?P<days>[0-9]+)\s*d(?:ays?)?\s*
)?
T)?
(?:
(?P<hours>[0-9]+)\s*h(?:ours?)?\s*
)?
(?:
(?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
)?
(?:
(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
)?Z?$''', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
if m:
hours, mins = m.groups()
else:
return None
duration = 0
if secs:
duration += float(secs)
if mins:
duration += float(mins) * 60
if hours:
duration += float(hours) * 60 * 60
if days:
duration += float(days) * 24 * 60 * 60
if ms:
duration += float(ms)
return duration
def prepend_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return (
'{0}.{1}{2}'.format(name, ext, real_ext)
if not expected_real_ext or real_ext[1:] == expected_real_ext
else '{0}.{1}'.format(filename, ext))
def replace_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return '{0}.{1}'.format(
name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
ext)
def check_executable(exe, args=[]):
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output (like -version) """
try:
Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate_or_kill()
except OSError:
return False
return exe
def _get_exe_version_output(exe, args):
try:
# STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
# SIGTTOU if yt-dlp is run in the background.
# See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
out, _ = Popen(
[encodeArgument(exe)] + args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate_or_kill()
except OSError:
return False
if isinstance(out, bytes): # Python 2.x
out = out.decode('ascii', 'ignore')
return out
def detect_exe_version(output, version_re=None, unrecognized='present'):
assert isinstance(output, compat_str)
if version_re is None:
version_re = r'version\s+([-0-9._a-zA-Z]+)'
m = re.search(version_re, output)
if m:
return m.group(1)
else:
return unrecognized
def get_exe_version(exe, args=['--version'],
version_re=None, unrecognized='present'):
""" Returns the version of the specified executable,
or False if the executable is not present """
out = _get_exe_version_output(exe, args)
return detect_exe_version(out, version_re, unrecognized) if out else False
class LazyList(collections.abc.Sequence):
''' Lazy immutable list from an iterable
Note that slices of a LazyList are lists and not LazyList'''
class IndexError(IndexError):
pass
def __init__(self, iterable, *, reverse=False, _cache=None):
self.__iterable = iter(iterable)
self.__cache = [] if _cache is None else _cache
self.__reversed = reverse
def __iter__(self):
if self.__reversed:
# We need to consume the entire iterable to iterate in reverse
yield from self.exhaust()
return
yield from self.__cache
for item in self.__iterable:
self.__cache.append(item)
yield item
def __exhaust(self):
self.__cache.extend(self.__iterable)
# Discard the emptied iterable to make it pickle-able
self.__iterable = []
return self.__cache
def exhaust(self):
''' Evaluate the entire iterable '''
return self.__exhaust()[::-1 if self.__reversed else 1]
@staticmethod
def __reverse_index(x):
return None if x is None else -(x + 1)
def __getitem__(self, idx):
if isinstance(idx, slice):
if self.__reversed:
idx = slice(self.__reverse_index(idx.start), self.__reverse_index(idx.stop), -(idx.step or 1))
start, stop, step = idx.start, idx.stop, idx.step or 1
elif isinstance(idx, int):
if self.__reversed:
idx = self.__reverse_index(idx)
start, stop, step = idx, idx, 0
else:
raise TypeError('indices must be integers or slices')
if ((start or 0) < 0 or (stop or 0) < 0
or (start is None and step < 0)
or (stop is None and step > 0)):
# We need to consume the entire iterable to be able to slice from the end
# Obviously, never use this with infinite iterables
self.__exhaust()
try:
return self.__cache[idx]
except IndexError as e:
raise self.IndexError(e) from e
n = max(start or 0, stop or 0) - len(self.__cache) + 1
if n > 0:
self.__cache.extend(itertools.islice(self.__iterable, n))
try:
return self.__cache[idx]
except IndexError as e:
raise self.IndexError(e) from e
def __bool__(self):
try:
self[-1] if self.__reversed else self[0]
except self.IndexError:
return False
return True
def __len__(self):
self.__exhaust()
return len(self.__cache)
def __reversed__(self):
return type(self)(self.__iterable, reverse=not self.__reversed, _cache=self.__cache)
def __copy__(self):
return type(self)(self.__iterable, reverse=self.__reversed, _cache=self.__cache)
def __deepcopy__(self, memo):
# FIXME: This is actually just a shallow copy
id_ = id(self)
memo[id_] = self.__copy__()
return memo[id_]
def __repr__(self):
# repr and str should mimic a list. So we exhaust the iterable
return repr(self.exhaust())
def __str__(self):
return repr(self.exhaust())
class PagedList:
class IndexError(IndexError):
pass
def __len__(self):
# This is only useful for tests
return len(self.getslice())
def __init__(self, pagefunc, pagesize, use_cache=True):
self._pagefunc = pagefunc
self._pagesize = pagesize
self._use_cache = use_cache
self._cache = {}
def getpage(self, pagenum):
page_results = self._cache.get(pagenum)
if page_results is None:
page_results = list(self._pagefunc(pagenum))
if self._use_cache:
self._cache[pagenum] = page_results
return page_results
def getslice(self, start=0, end=None):
return list(self._getslice(start, end))
def _getslice(self, start, end):
raise NotImplementedError('This method must be implemented by subclasses')
def __getitem__(self, idx):
# NOTE: cache must be enabled if this is used
if not isinstance(idx, int) or idx < 0:
raise TypeError('indices must be non-negative integers')
entries = self.getslice(idx, idx + 1)
if not entries:
raise self.IndexError()
return entries[0]
class OnDemandPagedList(PagedList):
def _getslice(self, start, end):
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
if start >= nextfirstid:
continue
startv = (
start % self._pagesize
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
if (end is not None and firstid <= end <= nextfirstid)
else None)
page_results = self.getpage(pagenum)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
yield from page_results
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len(page_results) + startv < self._pagesize:
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid:
break
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
self._pagecount = pagecount
PagedList.__init__(self, pagefunc, pagesize, True)
def _getslice(self, start, end):
start_page = start // self._pagesize
end_page = (
self._pagecount if end is None else (end // self._pagesize + 1))
skip_elems = start - start_page * self._pagesize
only_more = None if end is None else end - start
for pagenum in range(start_page, end_page):
page_results = self.getpage(pagenum)
if skip_elems:
page_results = page_results[skip_elems:]
skip_elems = None
if only_more is not None:
if len(page_results) < only_more:
only_more -= len(page_results)
else:
yield from page_results[:only_more]
break
yield from page_results
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\U[0-9a-fA-F]{8}',
lambda m: unicode_escape(m.group(0))[0],
s)
def lowercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4}',
lambda m: unicode_escape(m.group(0))[0],
s)
def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url):
"""Escape URL as suggested by RFC 3986"""
url_parsed = compat_urllib_parse_urlparse(url)
return url_parsed._replace(
netloc=url_parsed.netloc.encode('idna').decode('ascii'),
path=escape_rfc3986(url_parsed.path),
params=escape_rfc3986(url_parsed.params),
query=escape_rfc3986(url_parsed.query),
fragment=escape_rfc3986(url_parsed.fragment)
).geturl()
def parse_qs(url):
return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
def read_batch_urls(batch_fd):
def fixup(url):
if not isinstance(url, compat_str):
url = url.decode('utf-8', 'replace')
BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
for bom in BOM_UTF8:
if url.startswith(bom):
url = url[len(bom):]
url = url.lstrip()
if not url or url.startswith(('#', ';', ']')):
return False
# "#" cannot be stripped out since it is part of the URI
# However, it can be safely stipped out if follwing a whitespace
return re.split(r'\s#', url, 1)[0].rstrip()
with contextlib.closing(batch_fd) as fd:
return [url for url in map(fixup, fd) if url]
def urlencode_postdata(*args, **kargs):
return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
def update_url_query(url, query):
if not query:
return url
parsed_url = compat_urlparse.urlparse(url)
qs = compat_parse_qs(parsed_url.query)
qs.update(query)
return compat_urlparse.urlunparse(parsed_url._replace(
query=compat_urllib_parse_urlencode(qs, True)))
def update_Request(req, url=None, data=None, headers={}, query={}):
req_headers = req.headers.copy()
req_headers.update(headers)
req_data = data or req.data
req_url = update_url_query(url or req.get_full_url(), query)
req_get_method = req.get_method()
if req_get_method == 'HEAD':
req_type = HEADRequest
elif req_get_method == 'PUT':
req_type = PUTRequest
else:
req_type = compat_urllib_request.Request
new_req = req_type(
req_url, data=req_data, headers=req_headers,
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
if hasattr(req, 'timeout'):
new_req.timeout = req.timeout
return new_req
def _multipart_encode_impl(data, boundary):
content_type = 'multipart/form-data; boundary=%s' % boundary
out = b''
for k, v in data.items():
out += b'--' + boundary.encode('ascii') + b'\r\n'
if isinstance(k, compat_str):
k = k.encode('utf-8')
if isinstance(v, compat_str):
v = v.encode('utf-8')
# RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
# suggests sending UTF-8 directly. Firefox sends UTF-8, too
content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
if boundary.encode('ascii') in content:
raise ValueError('Boundary overlaps with data')
out += content
out += b'--' + boundary.encode('ascii') + b'--\r\n'
return out, content_type
def multipart_encode(data, boundary=None):
'''
Encode a dict to RFC 7578-compliant form-data
data:
A dict where keys and values can be either Unicode or bytes-like
objects.
boundary:
If specified a Unicode object, it's used as the boundary. Otherwise
a random boundary is generated.
Reference: https://tools.ietf.org/html/rfc7578
'''
has_specified_boundary = boundary is not None
while True:
if boundary is None:
boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
try:
out, content_type = _multipart_encode_impl(data, boundary)
break
except ValueError:
if has_specified_boundary:
raise
boundary = None
return out, content_type
def dict_get(d, key_or_keys, default=None, skip_false_values=True):
if isinstance(key_or_keys, (list, tuple)):
for key in key_or_keys:
if key not in d or d[key] is None or skip_false_values and not d[key]:
continue
return d[key]
return default
return d.get(key_or_keys, default)
def try_get(src, getter, expected_type=None):
for get in variadic(getter):
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
def merge_dicts(*dicts):
merged = {}
for a_dict in dicts:
for k, v in a_dict.items():
if v is None:
continue
if (k not in merged
or (isinstance(v, compat_str) and v
and isinstance(merged[k], compat_str)
and not merged[k])):
merged[k] = v
return merged
def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
US_RATINGS = {
'G': 0,
'PG': 10,
'PG-13': 13,
'R': 16,
'NC': 18,
}
TV_PARENTAL_GUIDELINES = {
'TV-Y': 0,
'TV-Y7': 7,
'TV-G': 0,
'TV-PG': 0,
'TV-14': 14,
'TV-MA': 17,
}
def parse_age_limit(s):
if type(s) == int:
return s if 0 <= s <= 21 else None
if not isinstance(s, compat_basestring):
return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
if m:
return int(m.group('age'))
s = s.upper()
if s in US_RATINGS:
return US_RATINGS[s]
m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
if m:
return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
return None
def strip_jsonp(code):
return re.sub(
r'''(?sx)^
(?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
(?:\s*&&\s*(?P=func_name))?
\s*\(\s*(?P<callback_data>.*)\);?
\s*?(?://[^\n]*)*$''',
r'\g<callback_data>', code)
def js_to_json(code, vars={}):
# vars is a dict of var, val pairs to substitute
COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
INTEGER_TABLE = (
(r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
(r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
)
def fix_kv(m):
v = m.group(0)
if v in ('true', 'false', 'null'):
return v
elif v in ('undefined', 'void 0'):
return 'null'
elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
return ""
if v[0] in ("'", '"'):
v = re.sub(r'(?s)\\.|"', lambda m: {
'"': '\\"',
"\\'": "'",
'\\\n': '',
'\\x': '\\u00',
}.get(m.group(0), m.group(0)), v[1:-1])
else:
for regex, base in INTEGER_TABLE:
im = re.match(regex, v)
if im:
i = int(im.group(1), base)
return '"%d":' % i if v.endswith(':') else '%d' % i
if v in vars:
return vars[v]
return '"%s"' % v
return re.sub(r'''(?sx)
"(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
{comment}|,(?={skip}[\]}}])|
void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
[0-9]+(?={skip}:)|
!+
'''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
try:
return quality_ids.index(qid)
except ValueError:
return -1
return q
DEFAULT_OUTTMPL = {
'default': '%(title)s [%(id)s].%(ext)s',
'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
}
OUTTMPL_TYPES = {
'chapter': None,
'subtitle': None,
'thumbnail': None,
'description': 'description',
'annotation': 'annotations.xml',
'infojson': 'info.json',
'link': None,
'pl_thumbnail': None,
'pl_description': 'description',
'pl_infojson': 'info.json',
}
# As of [1] format syntax is:
# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
STR_FORMAT_RE_TMPL = r'''(?x)
(?<!%)(?P<prefix>(?:%%)*)
%
(?P<has_key>\((?P<key>{0})\))?
(?P<format>
(?P<conversion>[#0\-+ ]+)?
(?P<min_width>\d+)?
(?P<precision>\.\d+)?
(?P<len_mod>[hlL])? # unused in python
{1} # conversion type
)
'''
STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
def limit_length(s, length):
""" Add ellipses to overly long strings """
if s is None:
return None
ELLIPSES = '...'
if len(s) > length:
return s[:length - len(ELLIPSES)] + ELLIPSES
return s
def version_tuple(v):
return tuple(int(e) for e in re.split(r'[-.]', v))
def is_outdated_version(version, limit, assume_new=True):
if not version:
return not assume_new
try:
return version_tuple(version) < version_tuple(limit)
except ValueError:
return not assume_new
def ytdl_is_updateable():
""" Returns if yt-dlp can be updated with -U """
from .update import is_non_updateable
return not is_non_updateable()
def args_to_str(args):
# Get a short string representation for a subprocess command
return ' '.join(compat_shlex_quote(a) for a in args)
def error_to_compat_str(err):
err_str = str(err)
# On python 2 error byte string must be decoded with proper
# encoding rather than ascii
if sys.version_info[0] < 3:
err_str = err_str.decode(preferredencoding())
return err_str
def mimetype2ext(mt):
if mt is None:
return None
mt, _, params = mt.partition(';')
mt = mt.strip()
FULL_MAP = {
'audio/mp4': 'm4a',
# Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
# it's the most popular one
'audio/mpeg': 'mp3',
'audio/x-wav': 'wav',
'audio/wav': 'wav',
'audio/wave': 'wav',
}
ext = FULL_MAP.get(mt)
if ext is not None:
return ext
SUBTYPE_MAP = {
'3gpp': '3gp',
'smptett+xml': 'tt',
'ttaf+xml': 'dfxp',
'ttml+xml': 'ttml',
'x-flv': 'flv',
'x-mp4-fragmented': 'mp4',
'x-ms-sami': 'sami',
'x-ms-wmv': 'wmv',
'mpegurl': 'm3u8',
'x-mpegurl': 'm3u8',
'vnd.apple.mpegurl': 'm3u8',
'dash+xml': 'mpd',
'f4m+xml': 'f4m',
'hds+xml': 'f4m',
'vnd.ms-sstr+xml': 'ism',
'quicktime': 'mov',
'mp2t': 'ts',
'x-wav': 'wav',
'filmstrip+json': 'fs',
'svg+xml': 'svg',
}
_, _, subtype = mt.rpartition('/')
ext = SUBTYPE_MAP.get(subtype.lower())
if ext is not None:
return ext
SUFFIX_MAP = {
'json': 'json',
'xml': 'xml',
'zip': 'zip',
'gzip': 'gz',
}
_, _, suffix = subtype.partition('+')
ext = SUFFIX_MAP.get(suffix)
if ext is not None:
return ext
return subtype.replace('+', '.')
def parse_codecs(codecs_str):
# http://tools.ietf.org/html/rfc6381
if not codecs_str:
return {}
split_codecs = list(filter(None, map(
str.strip, codecs_str.strip().strip(',').split(','))))
vcodec, acodec, hdr = None, None, None
for full_codec in split_codecs:
parts = full_codec.split('.')
codec = parts[0].replace('0', '')
if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
if not vcodec:
vcodec = '.'.join(parts[:4]) if codec in ('vp9', 'av1') else full_codec
if codec in ('dvh1', 'dvhe'):
hdr = 'DV'
elif codec == 'av1' and len(parts) > 3 and parts[3] == '10':
hdr = 'HDR10'
elif full_codec.replace('0', '').startswith('vp9.2'):
hdr = 'HDR10'
elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
if not acodec:
acodec = full_codec
else:
write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
if not vcodec and not acodec:
if len(split_codecs) == 2:
return {
'vcodec': split_codecs[0],
'acodec': split_codecs[1],
}
else:
return {
'vcodec': vcodec or 'none',
'acodec': acodec or 'none',
'dynamic_range': hdr,
}
return {}
def urlhandle_detect_ext(url_handle):
getheader = url_handle.headers.get
cd = getheader('Content-Disposition')
if cd:
m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
if m:
e = determine_ext(m.group('filename'), default_ext=None)
if e:
return e
return mimetype2ext(getheader('Content-Type'))
def encode_data_uri(data, mime_type):
return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """
if age_limit is None: # No limit set
return False
if content_limit is None:
return False # Content available for everyone
return age_limit < content_limit
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s)
def determine_protocol(info_dict):
protocol = info_dict.get('protocol')
if protocol is not None:
return protocol
url = sanitize_url(info_dict['url'])
if url.startswith('rtmp'):
return 'rtmp'
elif url.startswith('mms'):
return 'mms'
elif url.startswith('rtsp'):
return 'rtsp'
ext = determine_ext(url)
if ext == 'm3u8':
return 'm3u8'
elif ext == 'f4m':
return 'f4m'
return compat_urllib_parse_urlparse(url).scheme
def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
""" Render a list of rows, each as a list of values.
Text after a \t will be right aligned """
def width(string):
return len(remove_terminal_sequences(string).replace('\t', ''))
def get_max_lens(table):
return [max(width(str(v)) for v in col) for col in zip(*table)]
def filter_using_list(row, filterArray):
return [col for (take, col) in zip(filterArray, row) if take]
if hide_empty:
max_lens = get_max_lens(data)
header_row = filter_using_list(header_row, max_lens)
data = [filter_using_list(row, max_lens) for row in data]
table = [header_row] + data
max_lens = get_max_lens(table)
extra_gap += 1
if delim:
table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
table[1][-1] = table[1][-1][:-extra_gap] # Remove extra_gap from end of delimiter
for row in table:
for pos, text in enumerate(map(str, row)):
if '\t' in text:
row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
else:
row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
ret = '\n'.join(''.join(row).rstrip() for row in table)
return ret
def _match_one(filter_part, dct, incomplete):
# TODO: Generalize code with YoutubeDL._build_format_filter
STRING_OPERATORS = {
'*=': operator.contains,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'~=': lambda attr, value: re.search(value, attr),
}
COMPARISON_OPERATORS = {
**STRING_OPERATORS,
'<=': operator.le, # "<=" must be defined above "<"
'<': operator.lt,
'>=': operator.ge,
'>': operator.gt,
'=': operator.eq,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-z_]+)
\s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?:
(?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
(?P<strval>.+?)
)
\s*$
''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
m = m.groupdict()
unnegated_op = COMPARISON_OPERATORS[m['op']]
if m['negation']:
op = lambda attr, value: not unnegated_op(attr, value)
else:
op = unnegated_op
comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
if m['quote']:
comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
actual_value = dct.get(m['key'])
numeric_comparison = None
if isinstance(actual_value, compat_numeric_types):
# If the original field is a string and matching comparisonvalue is
# a number we should respect the origin of the original field
# and process comparison value as a string (see
# https://github.com/ytdl-org/youtube-dl/issues/11082)
try:
numeric_comparison = int(comparison_value)
except ValueError:
numeric_comparison = parse_filesize(comparison_value)
if numeric_comparison is None:
numeric_comparison = parse_filesize(f'{comparison_value}B')
if numeric_comparison is None:
numeric_comparison = parse_duration(comparison_value)
if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
raise ValueError('Operator %s only supports string values!' % m['op'])
if actual_value is None:
return incomplete or m['none_inclusive']
return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
UNARY_OPERATORS = {
'': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
'!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
}
operator_rex = re.compile(r'''(?x)\s*
(?P<op>%s)\s*(?P<key>[a-z_]+)
\s*$
''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = UNARY_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
if incomplete and actual_value is None:
return True
return op(actual_value)
raise ValueError('Invalid filter part %r' % filter_part)
def match_str(filter_str, dct, incomplete=False):
""" Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false
When incomplete, all conditions passes on missing fields
"""
return all(
_match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
for filter_part in re.split(r'(?<!\\)&', filter_str))
def match_filter_func(filter_str):
def _match_func(info_dict, *args, **kwargs):
if match_str(filter_str, info_dict, *args, **kwargs):
return None
else:
video_title = info_dict.get('title', info_dict.get('id', 'video'))
return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
return _match_func
def parse_dfxp_time_expr(time_expr):
if not time_expr:
return
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
if mobj:
return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
def srt_subtitles_timecode(seconds):
return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
def ass_subtitles_timecode(seconds):
time = timetuple_from_msec(seconds * 1000)
return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
def dfxp2srt(dfxp_data):
'''
@param dfxp_data A bytes-like object containing DFXP data
@returns A unicode object containing converted SRT data
'''
LEGACY_NAMESPACES = (
(b'http://www.w3.org/ns/ttml', [
b'http://www.w3.org/2004/11/ttaf1',
b'http://www.w3.org/2006/04/ttaf1',
b'http://www.w3.org/2006/10/ttaf1',
]),
(b'http://www.w3.org/ns/ttml#styling', [
b'http://www.w3.org/ns/ttml#style',
]),
)
SUPPORTED_STYLING = [
'color',
'fontFamily',
'fontSize',
'fontStyle',
'fontWeight',
'textDecoration'
]
_x = functools.partial(xpath_with_ns, ns_map={
'xml': 'http://www.w3.org/XML/1998/namespace',
'ttml': 'http://www.w3.org/ns/ttml',
'tts': 'http://www.w3.org/ns/ttml#styling',
})
styles = {}
default_style = {}
class TTMLPElementParser(object):
_out = ''
_unclosed_elements = []
_applied_styles = []
def start(self, tag, attrib):
if tag in (_x('ttml:br'), 'br'):
self._out += '\n'
else:
unclosed_elements = []
style = {}
element_style_id = attrib.get('style')
if default_style:
style.update(default_style)
if element_style_id:
style.update(styles.get(element_style_id, {}))
for prop in SUPPORTED_STYLING:
prop_val = attrib.get(_x('tts:' + prop))
if prop_val:
style[prop] = prop_val
if style:
font = ''
for k, v in sorted(style.items()):
if self._applied_styles and self._applied_styles[-1].get(k) == v:
continue
if k == 'color':
font += ' color="%s"' % v
elif k == 'fontSize':
font += ' size="%s"' % v
elif k == 'fontFamily':
font += ' face="%s"' % v
elif k == 'fontWeight' and v == 'bold':
self._out += '<b>'
unclosed_elements.append('b')
elif k == 'fontStyle' and v == 'italic':
self._out += '<i>'
unclosed_elements.append('i')
elif k == 'textDecoration' and v == 'underline':
self._out += '<u>'
unclosed_elements.append('u')
if font:
self._out += '<font' + font + '>'
unclosed_elements.append('font')
applied_style = {}
if self._applied_styles:
applied_style.update(self._applied_styles[-1])
applied_style.update(style)
self._applied_styles.append(applied_style)
self._unclosed_elements.append(unclosed_elements)
def end(self, tag):
if tag not in (_x('ttml:br'), 'br'):
unclosed_elements = self._unclosed_elements.pop()
for element in reversed(unclosed_elements):
self._out += '</%s>' % element
if unclosed_elements and self._applied_styles:
self._applied_styles.pop()
def data(self, data):
self._out += data
def close(self):
return self._out.strip()
def parse_node(node):
target = TTMLPElementParser()
parser = xml.etree.ElementTree.XMLParser(target=target)
parser.feed(xml.etree.ElementTree.tostring(node))
return parser.close()
for k, v in LEGACY_NAMESPACES:
for ns in v:
dfxp_data = dfxp_data.replace(ns, k)
dfxp = compat_etree_fromstring(dfxp_data)
out = []
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
if not paras:
raise ValueError('Invalid dfxp/TTML subtitle')
repeat = False
while True:
for style in dfxp.findall(_x('.//ttml:style')):
style_id = style.get('id') or style.get(_x('xml:id'))
if not style_id:
continue
parent_style_id = style.get('style')
if parent_style_id:
if parent_style_id not in styles:
repeat = True
continue
styles[style_id] = styles[parent_style_id].copy()
for prop in SUPPORTED_STYLING:
prop_val = style.get(_x('tts:' + prop))
if prop_val:
styles.setdefault(style_id, {})[prop] = prop_val
if repeat:
repeat = False
else:
break
for p in ('body', 'div'):
ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
if ele is None:
continue
style = styles.get(ele.get('style'))
if not style:
continue
default_style.update(style)
for para, index in zip(paras, itertools.count(1)):
begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
dur = parse_dfxp_time_expr(para.attrib.get('dur'))
if begin_time is None:
continue
if not end_time:
if not dur:
continue
end_time = begin_time + dur
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
srt_subtitles_timecode(end_time),
parse_node(para)))
return ''.join(out)
def cli_option(params, command_option, param):
param = params.get(param)
if param:
param = compat_str(param)
return [command_option, param] if param is not None else []
def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
param = params.get(param)
if param is None:
return []
assert isinstance(param, bool)
if separator:
return [command_option + separator + (true_value if param else false_value)]
return [command_option, true_value if param else false_value]
def cli_valueless_option(params, command_option, param, expected_value=True):
param = params.get(param)
return [command_option] if param == expected_value else []
def cli_configuration_args(argdict, keys, default=[], use_compat=True):
if isinstance(argdict, (list, tuple)): # for backward compatibility
if use_compat:
return argdict
else:
argdict = None
if argdict is None:
return default
assert isinstance(argdict, dict)
assert isinstance(keys, (list, tuple))
for key_list in keys:
arg_list = list(filter(
lambda x: x is not None,
[argdict.get(key.lower()) for key in variadic(key_list)]))
if arg_list:
return [arg for args in arg_list for arg in args]
return default
def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
main_key, exe = main_key.lower(), exe.lower()
root_key = exe if main_key == exe else f'{main_key}+{exe}'
keys = [f'{root_key}{k}' for k in (keys or [''])]
if root_key in keys:
if main_key != exe:
keys.append((main_key, exe))
keys.append('default')
else:
use_compat = False
return cli_configuration_args(argdict, keys, default, use_compat)
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'iw': 'heb', # Replaced by he in 1989 revision
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'in': 'ind', # Replaced by id in 1989 revision
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'ji': 'yid', # Replaced by yi in 1989 revision
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
@classmethod
def short2long(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
@classmethod
def long2short(cls, code):
"""Convert language code from ISO 639-2/T to ISO 639-1"""
for short_name, long_name in cls._lang_map.items():
if long_name == code:
return short_name
class ISO3166Utils(object):
# From http://data.okfn.org/data/core/country-list
_country_map = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia, Plurinational State of',
'BQ': 'Bonaire, Sint Eustatius and Saba',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': 'Côte d\'Ivoire',
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran, Islamic Republic of',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'Korea, Democratic People\'s Republic of',
'KR': 'Korea, Republic of',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': 'Lao People\'s Democratic Republic',
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the Former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova, Republic of',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russian Federation',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan, Province of China',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela, Bolivarian Republic of',
'VN': 'Viet Nam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
@classmethod
def short2full(cls, code):
"""Convert an ISO 3166-2 country code to the corresponding full name"""
return cls._country_map.get(code.upper())
class GeoUtils(object):
# Major IPv4 address blocks per country
_country_ip_map = {
'AD': '46.172.224.0/19',
'AE': '94.200.0.0/13',
'AF': '149.54.0.0/17',
'AG': '209.59.64.0/18',
'AI': '204.14.248.0/21',
'AL': '46.99.0.0/16',
'AM': '46.70.0.0/15',
'AO': '105.168.0.0/13',
'AP': '182.50.184.0/21',
'AQ': '23.154.160.0/24',
'AR': '181.0.0.0/12',
'AS': '202.70.112.0/20',
'AT': '77.116.0.0/14',
'AU': '1.128.0.0/11',
'AW': '181.41.0.0/18',
'AX': '185.217.4.0/22',
'AZ': '5.197.0.0/16',
'BA': '31.176.128.0/17',
'BB': '65.48.128.0/17',
'BD': '114.130.0.0/16',
'BE': '57.0.0.0/8',
'BF': '102.178.0.0/15',
'BG': '95.42.0.0/15',
'BH': '37.131.0.0/17',
'BI': '154.117.192.0/18',
'BJ': '137.255.0.0/16',
'BL': '185.212.72.0/23',
'BM': '196.12.64.0/18',
'BN': '156.31.0.0/16',
'BO': '161.56.0.0/16',
'BQ': '161.0.80.0/20',
'BR': '191.128.0.0/12',
'BS': '24.51.64.0/18',
'BT': '119.2.96.0/19',
'BW': '168.167.0.0/16',
'BY': '178.120.0.0/13',
'BZ': '179.42.192.0/18',
'CA': '99.224.0.0/11',
'CD': '41.243.0.0/16',
'CF': '197.242.176.0/21',
'CG': '160.113.0.0/16',
'CH': '85.0.0.0/13',
'CI': '102.136.0.0/14',
'CK': '202.65.32.0/19',
'CL': '152.172.0.0/14',
'CM': '102.244.0.0/14',
'CN': '36.128.0.0/10',
'CO': '181.240.0.0/12',
'CR': '201.192.0.0/12',
'CU': '152.206.0.0/15',
'CV': '165.90.96.0/19',
'CW': '190.88.128.0/17',
'CY': '31.153.0.0/16',
'CZ': '88.100.0.0/14',
'DE': '53.0.0.0/8',
'DJ': '197.241.0.0/17',
'DK': '87.48.0.0/12',
'DM': '192.243.48.0/20',
'DO': '152.166.0.0/15',
'DZ': '41.96.0.0/12',
'EC': '186.68.0.0/15',
'EE': '90.190.0.0/15',
'EG': '156.160.0.0/11',
'ER': '196.200.96.0/20',
'ES': '88.0.0.0/11',
'ET': '196.188.0.0/14',
'EU': '2.16.0.0/13',
'FI': '91.152.0.0/13',
'FJ': '144.120.0.0/16',
'FK': '80.73.208.0/21',
'FM': '119.252.112.0/20',
'FO': '88.85.32.0/19',
'FR': '90.0.0.0/9',
'GA': '41.158.0.0/15',
'GB': '25.0.0.0/8',
'GD': '74.122.88.0/21',
'GE': '31.146.0.0/16',
'GF': '161.22.64.0/18',
'GG': '62.68.160.0/19',
'GH': '154.160.0.0/12',
'GI': '95.164.0.0/16',
'GL': '88.83.0.0/19',
'GM': '160.182.0.0/15',
'GN': '197.149.192.0/18',
'GP': '104.250.0.0/19',
'GQ': '105.235.224.0/20',
'GR': '94.64.0.0/13',
'GT': '168.234.0.0/16',
'GU': '168.123.0.0/16',
'GW': '197.214.80.0/20',
'GY': '181.41.64.0/18',
'HK': '113.252.0.0/14',
'HN': '181.210.0.0/16',
'HR': '93.136.0.0/13',
'HT': '148.102.128.0/17',
'HU': '84.0.0.0/14',
'ID': '39.192.0.0/10',
'IE': '87.32.0.0/12',
'IL': '79.176.0.0/13',
'IM': '5.62.80.0/20',
'IN': '117.192.0.0/10',
'IO': '203.83.48.0/21',
'IQ': '37.236.0.0/14',
'IR': '2.176.0.0/12',
'IS': '82.221.0.0/16',
'IT': '79.0.0.0/10',
'JE': '87.244.64.0/18',
'JM': '72.27.0.0/17',
'JO': '176.29.0.0/16',
'JP': '133.0.0.0/8',
'KE': '105.48.0.0/12',
'KG': '158.181.128.0/17',
'KH': '36.37.128.0/17',
'KI': '103.25.140.0/22',
'KM': '197.255.224.0/20',
'KN': '198.167.192.0/19',
'KP': '175.45.176.0/22',
'KR': '175.192.0.0/10',
'KW': '37.36.0.0/14',
'KY': '64.96.0.0/15',
'KZ': '2.72.0.0/13',
'LA': '115.84.64.0/18',
'LB': '178.135.0.0/16',
'LC': '24.92.144.0/20',
'LI': '82.117.0.0/19',
'LK': '112.134.0.0/15',
'LR': '102.183.0.0/16',
'LS': '129.232.0.0/17',
'LT': '78.56.0.0/13',
'LU': '188.42.0.0/16',
'LV': '46.109.0.0/16',
'LY': '41.252.0.0/14',
'MA': '105.128.0.0/11',
'MC': '88.209.64.0/18',
'MD': '37.246.0.0/16',
'ME': '178.175.0.0/17',
'MF': '74.112.232.0/21',
'MG': '154.126.0.0/17',
'MH': '117.103.88.0/21',
'MK': '77.28.0.0/15',
'ML': '154.118.128.0/18',
'MM': '37.111.0.0/17',
'MN': '49.0.128.0/17',
'MO': '60.246.0.0/16',
'MP': '202.88.64.0/20',
'MQ': '109.203.224.0/19',
'MR': '41.188.64.0/18',
'MS': '208.90.112.0/22',
'MT': '46.11.0.0/16',
'MU': '105.16.0.0/12',
'MV': '27.114.128.0/18',
'MW': '102.70.0.0/15',
'MX': '187.192.0.0/11',
'MY': '175.136.0.0/13',
'MZ': '197.218.0.0/15',
'NA': '41.182.0.0/16',
'NC': '101.101.0.0/18',
'NE': '197.214.0.0/18',
'NF': '203.17.240.0/22',
'NG': '105.112.0.0/12',
'NI': '186.76.0.0/15',
'NL': '145.96.0.0/11',
'NO': '84.208.0.0/13',
'NP': '36.252.0.0/15',
'NR': '203.98.224.0/19',
'NU': '49.156.48.0/22',
'NZ': '49.224.0.0/14',
'OM': '5.36.0.0/15',
'PA': '186.72.0.0/15',
'PE': '186.160.0.0/14',
'PF': '123.50.64.0/18',
'PG': '124.240.192.0/19',
'PH': '49.144.0.0/13',
'PK': '39.32.0.0/11',
'PL': '83.0.0.0/11',
'PM': '70.36.0.0/20',
'PR': '66.50.0.0/16',
'PS': '188.161.0.0/16',
'PT': '85.240.0.0/13',
'PW': '202.124.224.0/20',
'PY': '181.120.0.0/14',
'QA': '37.210.0.0/15',
'RE': '102.35.0.0/16',
'RO': '79.112.0.0/13',
'RS': '93.86.0.0/15',
'RU': '5.136.0.0/13',
'RW': '41.186.0.0/16',
'SA': '188.48.0.0/13',
'SB': '202.1.160.0/19',
'SC': '154.192.0.0/11',
'SD': '102.120.0.0/13',
'SE': '78.64.0.0/12',
'SG': '8.128.0.0/10',
'SI': '188.196.0.0/14',
'SK': '78.98.0.0/15',
'SL': '102.143.0.0/17',
'SM': '89.186.32.0/19',
'SN': '41.82.0.0/15',
'SO': '154.115.192.0/18',
'SR': '186.179.128.0/17',
'SS': '105.235.208.0/21',
'ST': '197.159.160.0/19',
'SV': '168.243.0.0/16',
'SX': '190.102.0.0/20',
'SY': '5.0.0.0/16',
'SZ': '41.84.224.0/19',
'TC': '65.255.48.0/20',
'TD': '154.68.128.0/19',
'TG': '196.168.0.0/14',
'TH': '171.96.0.0/13',
'TJ': '85.9.128.0/18',
'TK': '27.96.24.0/21',
'TL': '180.189.160.0/20',
'TM': '95.85.96.0/19',
'TN': '197.0.0.0/11',
'TO': '175.176.144.0/21',
'TR': '78.160.0.0/11',
'TT': '186.44.0.0/15',
'TV': '202.2.96.0/19',
'TW': '120.96.0.0/11',
'TZ': '156.156.0.0/14',
'UA': '37.52.0.0/14',
'UG': '102.80.0.0/13',
'US': '6.0.0.0/8',
'UY': '167.56.0.0/13',
'UZ': '84.54.64.0/18',
'VA': '212.77.0.0/19',
'VC': '207.191.240.0/21',
'VE': '186.88.0.0/13',
'VG': '66.81.192.0/20',
'VI': '146.226.0.0/16',
'VN': '14.160.0.0/11',
'VU': '202.80.32.0/20',
'WF': '117.20.32.0/21',
'WS': '202.4.32.0/19',
'YE': '134.35.0.0/16',
'YT': '41.242.116.0/22',
'ZA': '41.0.0.0/11',
'ZM': '102.144.0.0/13',
'ZW': '102.177.192.0/18',
}
@classmethod
def random_ipv4(cls, code_or_block):
if len(code_or_block) == 2:
block = cls._country_ip_map.get(code_or_block.upper())
if not block:
return None
else:
block = code_or_block
addr, preflen = block.split('/')
addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
addr_max = addr_min | (0xffffffff >> int(preflen))
return compat_str(socket.inet_ntoa(
compat_struct_pack('!L', random.randint(addr_min, addr_max))))
class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
def __init__(self, proxies=None):
# Set default handlers
for type in ('http', 'https'):
setattr(self, '%s_open' % type,
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
meth(r, proxy, type))
compat_urllib_request.ProxyHandler.__init__(self, proxies)
def proxy_open(self, req, proxy, type):
req_proxy = req.headers.get('Ytdl-request-proxy')
if req_proxy is not None:
proxy = req_proxy
del req.headers['Ytdl-request-proxy']
if proxy == '__noproxy__':
return None # No Proxy
if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
req.add_header('Ytdl-socks-proxy', proxy)
# yt-dlp's http/https handlers do wrapping the socket with socks
return None
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
# released into Public Domain
# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
n = int(n)
while n > 0:
s = compat_struct_pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s
def bytes_to_long(s):
"""bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes().
"""
acc = 0
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = b'\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
return acc
def ohdave_rsa_encrypt(data, exponent, modulus):
'''
Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
Input:
data: data to encrypt, bytes-like object
exponent, modulus: parameter e and N of RSA algorithm, both integer
Output: hex string of encrypted data
Limitation: supports one block encryption only
'''
payload = int(binascii.hexlify(data[::-1]), 16)
encrypted = pow(payload, exponent, modulus)
return '%x' % encrypted
def pkcs1pad(data, length):
"""
Padding input data with PKCS#1 scheme
@param {int[]} data input data
@param {int} length target length
@returns {int[]} padded data
"""
if len(data) > length - 11:
raise ValueError('Input data too long for PKCS#1 padding')
pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
return [0, 2] + pseudo_random + [0] + data
def encode_base_n(num, n, table=None):
FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not table:
table = FULL_TABLE[:n]
if n > len(table):
raise ValueError('base %d exceeds table length %d' % (n, len(table)))
if num == 0:
return table[0]
ret = ''
while num:
ret = table[num % n] + ret
num = num // n
return ret
def decode_packed_codes(code):
mobj = re.search(PACKED_CODES_RE, code)
obfuscated_code, base, count, symbols = mobj.groups()
base = int(base)
count = int(count)
symbols = symbols.split('|')
symbol_table = {}
while count:
count -= 1
base_n_count = encode_base_n(count, base)
symbol_table[base_n_count] = symbols[count] or base_n_count
return re.sub(
r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
obfuscated_code)
def caesar(s, alphabet, shift):
if shift == 0:
return s
l = len(alphabet)
return ''.join(
alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
for c in s)
def rot47(s):
return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
def parse_m3u8_attributes(attrib):
info = {}
for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
if val.startswith('"'):
val = val[1:-1]
info[key] = val
return info
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
# Based on png2str() written by @gdkchan and improved by @yokrysty
# Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
def decode_png(png_data):
# Reference: https://www.w3.org/TR/PNG/
header = png_data[8:]
if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
raise IOError('Not a valid PNG file.')
int_map = {1: '>B', 2: '>H', 4: '>I'}
unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
chunks = []
while header:
length = unpack_integer(header[:4])
header = header[4:]
chunk_type = header[:4]
header = header[4:]
chunk_data = header[:length]
header = header[length:]
header = header[4:] # Skip CRC
chunks.append({
'type': chunk_type,
'length': length,
'data': chunk_data
})
ihdr = chunks[0]['data']
width = unpack_integer(ihdr[:4])
height = unpack_integer(ihdr[4:8])
idat = b''
for chunk in chunks:
if chunk['type'] == b'IDAT':
idat += chunk['data']
if not idat:
raise IOError('Unable to read PNG data.')
decompressed_data = bytearray(zlib.decompress(idat))
stride = width * 3
pixels = []
def _get_pixel(idx):
x = idx % stride
y = idx // stride
return pixels[y][x]
for y in range(height):
basePos = y * (1 + stride)
filter_type = decompressed_data[basePos]
current_row = []
pixels.append(current_row)
for x in range(stride):
color = decompressed_data[1 + basePos + x]
basex = y * stride + x
left = 0
up = 0
if x > 2:
left = _get_pixel(basex - 3)
if y > 0:
up = _get_pixel(basex - stride)
if filter_type == 1: # Sub
color = (color + left) & 0xff
elif filter_type == 2: # Up
color = (color + up) & 0xff
elif filter_type == 3: # Average
color = (color + ((left + up) >> 1)) & 0xff
elif filter_type == 4: # Paeth
a = left
b = up
c = 0
if x > 2 and y > 0:
c = _get_pixel(basex - stride - 3)
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
color = (color + a) & 0xff
elif pb <= pc:
color = (color + b) & 0xff
else:
color = (color + c) & 0xff
current_row.append(color)
return width, height, pixels
def write_xattr(path, key, value):
# This mess below finds the best xattr tool for the job
try:
# try the pyxattr module...
import xattr
if hasattr(xattr, 'set'): # pyxattr
# Unicode arguments are not supported in python-pyxattr until
# version 0.5.0
# See https://github.com/ytdl-org/youtube-dl/issues/5498
pyxattr_required_version = '0.5.0'
if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
# TODO: fallback to CLI tools
raise XAttrUnavailableError(
'python-pyxattr is detected but is too old. '
'yt-dlp requires %s or above while your version is %s. '
'Falling back to other xattr implementations' % (
pyxattr_required_version, xattr.__version__))
setxattr = xattr.set
else: # xattr
setxattr = xattr.setxattr
try:
setxattr(path, key, value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
except ImportError:
if compat_os_name == 'nt':
# Write xattrs to NTFS Alternate Data Streams:
# http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
assert ':' not in key
assert os.path.exists(path)
ads_fn = path + ':' + key
try:
with open(ads_fn, 'wb') as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
user_has_setfattr = check_executable('setfattr', ['--version'])
user_has_xattr = check_executable('xattr', ['-h'])
if user_has_setfattr or user_has_xattr:
value = value.decode('utf-8')
if user_has_setfattr:
executable = 'setfattr'
opts = ['-n', key, '-v', value]
elif user_has_xattr:
executable = 'xattr'
opts = ['-w', key, value]
cmd = ([encodeFilename(executable, True)]
+ [encodeArgument(o) for o in opts]
+ [encodeFilename(path, True)])
try:
p = Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
stdout, stderr = p.communicate_or_kill()
stderr = stderr.decode('utf-8', 'replace')
if p.returncode != 0:
raise XAttrMetadataError(p.returncode, stderr)
else:
# On Unix, and can't find pyxattr, setfattr, or xattr.
if sys.platform.startswith('linux'):
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'pyxattr' or 'xattr' "
"modules, or the GNU 'attr' package "
"(which contains the 'setfattr' tool).")
else:
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'xattr' module, "
"or the 'xattr' binary.")
def random_birthday(year_field, month_field, day_field):
start_date = datetime.date(1950, 1, 1)
end_date = datetime.date(1995, 12, 31)
offset = random.randint(0, (end_date - start_date).days)
random_date = start_date + datetime.timedelta(offset)
return {
year_field: str(random_date.year),
month_field: str(random_date.month),
day_field: str(random_date.day),
}
# Templates for internet shortcut files, which are plain text files.
DOT_URL_LINK_TEMPLATE = '''
[InternetShortcut]
URL=%(url)s
'''.lstrip()
DOT_WEBLOC_LINK_TEMPLATE = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
\t<key>URL</key>
\t<string>%(url)s</string>
</dict>
</plist>
'''.lstrip()
DOT_DESKTOP_LINK_TEMPLATE = '''
[Desktop Entry]
Encoding=UTF-8
Name=%(filename)s
Type=Link
URL=%(url)s
Icon=text-html
'''.lstrip()
LINK_TEMPLATES = {
'url': DOT_URL_LINK_TEMPLATE,
'desktop': DOT_DESKTOP_LINK_TEMPLATE,
'webloc': DOT_WEBLOC_LINK_TEMPLATE,
}
def iri_to_uri(iri):
"""
Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
"""
iri_parts = compat_urllib_parse_urlparse(iri)
if '[' in iri_parts.netloc:
raise ValueError('IPv6 URIs are not, yet, supported.')
# Querying `.netloc`, when there's only one bracket, also raises a ValueError.
# The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
net_location = ''
if iri_parts.username:
net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~")
if iri_parts.password is not None:
net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~")
net_location += '@'
net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
# The 'idna' encoding produces ASCII text.
if iri_parts.port is not None and iri_parts.port != 80:
net_location += ':' + str(iri_parts.port)
return compat_urllib_parse_urlunparse(
(iri_parts.scheme,
net_location,
compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
# Unsure about the `safe` argument, since this is a legacy way of handling parameters.
compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
# Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
# Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
def to_high_limit_path(path):
if sys.platform in ['win32', 'cygwin']:
# Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
return r'\\?\ '.rstrip() + os.path.abspath(path)
return path
def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None):
if field is None:
val = obj if obj is not None else default
else:
val = obj.get(field, default)
if func and val not in ignore:
val = func(val)
return template % val if val not in ignore else default
def clean_podcast_url(url):
return re.sub(r'''(?x)
(?:
(?:
chtbl\.com/track|
media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
play\.podtrac\.com
)/[^/]+|
(?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
flex\.acast\.com|
pd(?:
cn\.co| # https://podcorn.com/analytics-prefix/
st\.fm # https://podsights.com/docs/
)/e
)/''', '', url)
_HEX_TABLE = '0123456789abcdef'
def random_uuidv4():
return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
def make_dir(path, to_screen=None):
try:
dn = os.path.dirname(path)
if dn and not os.path.exists(dn):
os.makedirs(dn)
return True
except (OSError, IOError) as err:
if callable(to_screen) is not None:
to_screen('unable to create directory ' + error_to_compat_str(err))
return False
def get_executable_path():
from zipimport import zipimporter
if hasattr(sys, 'frozen'): # Running from PyInstaller
path = os.path.dirname(sys.executable)
elif isinstance(globals().get('__loader__'), zipimporter): # Running from ZIP
path = os.path.join(os.path.dirname(__file__), '../..')
else:
path = os.path.join(os.path.dirname(__file__), '..')
return os.path.abspath(path)
def load_plugins(name, suffix, namespace):
classes = {}
try:
plugins_spec = importlib.util.spec_from_file_location(
name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py'))
plugins = importlib.util.module_from_spec(plugins_spec)
sys.modules[plugins_spec.name] = plugins
plugins_spec.loader.exec_module(plugins)
for name in dir(plugins):
if name in namespace:
continue
if not name.endswith(suffix):
continue
klass = getattr(plugins, name)
classes[name] = namespace[name] = klass
except FileNotFoundError:
pass
return classes
def traverse_obj(
obj, *path_list, default=None, expected_type=None, get_all=True,
casesense=True, is_user_input=False, traverse_string=False):
''' Traverse nested list/dict/tuple
@param path_list A list of paths which are checked one by one.
Each path is a list of keys where each key is a string,
a function, a tuple of strings or "...".
When a fuction is given, it takes the key as argument and
returns whether the key matches or not. When a tuple is given,
all the keys given in the tuple are traversed, and
"..." traverses all the keys in the object
@param default Default value to return
@param expected_type Only accept final value of this type (Can also be any callable)
@param get_all Return all the values obtained from a path or only the first one
@param casesense Whether to consider dictionary keys as case sensitive
@param is_user_input Whether the keys are generated from user input. If True,
strings are converted to int/slice if necessary
@param traverse_string Whether to traverse inside strings. If True, any
non-compatible object will also be converted into a string
# TODO: Write tests
'''
if not casesense:
_lower = lambda k: (k.lower() if isinstance(k, str) else k)
path_list = (map(_lower, variadic(path)) for path in path_list)
def _traverse_obj(obj, path, _current_depth=0):
nonlocal depth
path = tuple(variadic(path))
for i, key in enumerate(path):
if obj is None:
return None
if isinstance(key, (list, tuple)):
obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
key = ...
if key is ...:
obj = (obj.values() if isinstance(obj, dict)
else obj if isinstance(obj, (list, tuple, LazyList))
else str(obj) if traverse_string else [])
_current_depth += 1
depth = max(depth, _current_depth)
return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
elif callable(key):
if isinstance(obj, (list, tuple, LazyList)):
obj = enumerate(obj)
elif isinstance(obj, dict):
obj = obj.items()
else:
if not traverse_string:
return None
obj = str(obj)
_current_depth += 1
depth = max(depth, _current_depth)
return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if key(k)]
elif isinstance(obj, dict) and not (is_user_input and key == ':'):
obj = (obj.get(key) if casesense or (key in obj)
else next((v for k, v in obj.items() if _lower(k) == key), None))
else:
if is_user_input:
key = (int_or_none(key) if ':' not in key
else slice(*map(int_or_none, key.split(':'))))
if key == slice(None):
return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth)
if not isinstance(key, (int, slice)):
return None
if not isinstance(obj, (list, tuple, LazyList)):
if not traverse_string:
return None
obj = str(obj)
try:
obj = obj[key]
except IndexError:
return None
return obj
if isinstance(expected_type, type):
type_test = lambda val: val if isinstance(val, expected_type) else None
elif expected_type is not None:
type_test = expected_type
else:
type_test = lambda val: val
for path in path_list:
depth = 0
val = _traverse_obj(obj, path)
if val is not None:
if depth:
for _ in range(depth - 1):
val = itertools.chain.from_iterable(v for v in val if v is not None)
val = [v for v in map(type_test, val) if v is not None]
if val:
return val if get_all else val[0]
else:
val = type_test(val)
if val is not None:
return val
return default
# Deprecated
def traverse_dict(dictn, keys, casesense=True):
write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
def variadic(x, allowed_types=(str, bytes)):
return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
# create a JSON Web Signature (jws) with HS256 algorithm
# the resulting format is in JWS Compact Serialization
# implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
# implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
def jwt_encode_hs256(payload_data, key, headers={}):
header_data = {
'alg': 'HS256',
'typ': 'JWT',
}
if headers:
header_data.update(headers)
header_b64 = base64.b64encode(json.dumps(header_data).encode('utf-8'))
payload_b64 = base64.b64encode(json.dumps(payload_data).encode('utf-8'))
h = hmac.new(key.encode('utf-8'), header_b64 + b'.' + payload_b64, hashlib.sha256)
signature_b64 = base64.b64encode(h.digest())
token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
return token
# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
def jwt_decode_hs256(jwt):
header_b64, payload_b64, signature_b64 = jwt.split('.')
payload_data = json.loads(base64.urlsafe_b64decode(payload_b64))
return payload_data
def supports_terminal_sequences(stream):
if compat_os_name == 'nt':
if get_windows_version() < (10, 0, 10586):
return False
elif not os.getenv('TERM'):
return False
try:
return stream.isatty()
except BaseException:
return False
_terminal_sequences_re = re.compile('\033\\[[^m]+m')
def remove_terminal_sequences(string):
return _terminal_sequences_re.sub('', string)
def number_of_digits(number):
return len('%d' % number)
def join_nonempty(*values, delim='-', from_dict=None):
if from_dict is not None:
values = map(from_dict.get, values)
return delim.join(map(str, filter(None, values)))
|
py | b40e2808fdf8e702cc60f7203c22815466dfbb99 | """
__init__ file for dataset scripts
"""
from .resize import resize_image
|
py | b40e29006e933ddfd5de8a7b6ac4849509a81ca4 | from .calibre_access import (print_record, calibre_downloads, calibre_searches,
all_records, download_coro, search_coro, download_database,
locate_logs, get_database)
__all__ = (print_record, calibre_downloads, calibre_searches, all_records, download_coro,
search_coro, download_database, locate_logs, get_database)
|
py | b40e2a2da048b213d193c02e5d910f5065ce8737 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import os.path
import platform
import sys
import subprocess
from functools import partial
from typing import Optional, Set
try:
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
except ImportError:
# needed for py3+qt4
# Ref:
# http://pyqt.sourceforge.net/Docs/PyQt4/incompatible_apis.html
# http://stackoverflow.com/questions/21217399/pyqt4-qtcore-qvariant-object-instead-of-a-string
if sys.version_info.major >= 3:
import sip
sip.setapi('QVariant', 2)
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from libs.resources import *
from libs.constants import *
from libs.utils import *
from libs.settings import Settings
from libs.shape import Shape, DEFAULT_LINE_COLOR, DEFAULT_FILL_COLOR
from libs.stringBundle import StringBundle
from libs.canvas import Canvas
from libs.zoomWidget import ZoomWidget
from libs.labelDialog import LabelDialog
from libs.colorDialog import ColorDialog
from libs.labelFile import LabelFile, LabelFileError
from libs.toolBar import ToolBar
from libs.pascal_voc_io import PascalVocReader
from libs.pascal_voc_io import XML_EXT
from libs.yolo_io import YoloReader
from libs.yolo_io import TXT_EXT
from libs.ustr import ustr
from libs.hashableQListWidgetItem import HashableQListWidgetItem
__appname__ = 'labelImg'
class WindowMixin(object):
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName(u'%sToolBar' % title)
# toolbar.setOrientation(Qt.Vertical)
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
addActions(toolbar, actions)
self.addToolBar(Qt.LeftToolBarArea, toolbar)
return toolbar
class UtilsFuncMixin(object):
r"""这个类专门用来放一些和类方法不是强相关,比较通用,没有什么成员依赖的函数
"""
preShortCutModeSetting: str = ''
def str2dict(self,input:str) -> Optional[dict]:
r"""将类似 a=xy,b=cat这种字符串转为字典 a,b为key,xy,cat为value
这里遇到awsdAWSD 抛出异常,主要服务于setShortCutMode_slot函数
"""
result={}
input=input.replace(',',',')
for item in input.split(','):
k,*_,v=item.split('=')
if k.isalpha():
if len(k) != 1:
return None
if k in ('a','s','d','w','A','S','D','W'):
return None
result[k.upper()]=v
else:
return None
return result
class MainWindow(QMainWindow, WindowMixin,UtilsFuncMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, defaultFilename=None, defaultPrefdefClassFile=None, defaultSaveDir=None):
super(MainWindow, self).__init__()
self.setWindowTitle(__appname__)
self.shortCutModeKeyMap={} # 用于在shortCutMode中帮助指示哪些快捷键可用,key为 Qt.key,value是标签名
# 添加是否打开txt
self.isTxt = False
# 添加单类别显示
self.only_show = None
# Load setting in the main thread
self.txtPath: Optional[str] = None
self.saveTxtName = '1.txt'
self.imgPath: Optional[str] = None
self.isDelete = False
self.settings = Settings()
self.settings.load()
settings = self.settings
# Load string bundle for i18n
self.stringBundle = StringBundle.getBundle()
getStr = lambda strId: self.stringBundle.getString(strId)
# Save as Pascal voc xml
self.defaultSaveDir = defaultSaveDir
self.usingPascalVocFormat = True
self.usingYoloFormat = False
# For loading all image under a directory
self.mImgList = []
self.dirname = None
self.labelHist = [] #记录所有标签名称
self.lastOpenDir = None
# Whether we need to save or not.
self.dirty = False
self._noSelectionSlot = False
self._beginner = True
self.screencastViewer = self.getAvailableScreencastViewer()
self.screencast = "https://youtu.be/p0nR2YsCY_U"
# Load predefined classes to the list
self.loadPredefinedClasses(defaultPrefdefClassFile)
# Main widgets and related state.
self.labelDialog = LabelDialog(parent=self, listItem=self.labelHist)
self.itemsToShapes = {}
self.shapesToItems = {}
self.prevLabelText = ''
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
# Create a widget for using default label
self.useDefaultLabelCheckbox = QCheckBox(getStr('useDefaultLabel'))
self.useDefaultLabelCheckbox.setChecked(False)
self.defaultLabelTextLine = QLineEdit()
useDefaultLabelQHBoxLayout = QHBoxLayout()
useDefaultLabelQHBoxLayout.addWidget(self.useDefaultLabelCheckbox)
useDefaultLabelQHBoxLayout.addWidget(self.defaultLabelTextLine)
useDefaultLabelContainer = QWidget()
useDefaultLabelContainer.setLayout(useDefaultLabelQHBoxLayout)
# Create a widget for edit and diffc button
self.diffcButton = QCheckBox(getStr('useDifficult'))
self.diffcButton.setChecked(False)
self.diffcButton.stateChanged.connect(self.btnstate)
self.editButton = QToolButton()
self.editButton.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
# Add some of widgets to listLayout
listLayout.addWidget(self.editButton)
listLayout.addWidget(self.diffcButton)
listLayout.addWidget(useDefaultLabelContainer)
# Create and add a widget for showing current label items
self.labelList = QListWidget()
labelListContainer = QWidget()
labelListContainer.setLayout(listLayout)
self.labelList.itemActivated.connect(self.labelSelectionChanged)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
# Connect to itemChanged to detect checkbox changes.
self.labelList.itemChanged.connect(self.labelItemChanged)
listLayout.addWidget(self.labelList)
self.dock = QDockWidget(getStr('boxLabelText'), self)
self.dock.setObjectName(getStr('labels'))
self.dock.setWidget(labelListContainer)
self.fileListWidget = QListWidget()
self.fileListWidget.itemDoubleClicked.connect(self.fileitemDoubleClicked)
filelistLayout = QVBoxLayout()
filelistLayout.setContentsMargins(0, 0, 0, 0)
filelistLayout.addWidget(self.fileListWidget)
fileListContainer = QWidget()
fileListContainer.setLayout(filelistLayout)
self.filedock = QDockWidget(getStr('fileList'), self)
self.filedock.setObjectName(getStr('files'))
self.filedock.setWidget(fileListContainer)
self.zoomWidget = ZoomWidget()
self.colorDialog = ColorDialog(parent=self)
self.canvas = Canvas(parent=self)
self.canvas.zoomRequest.connect(self.zoomRequest)
self.canvas.setDrawingShapeToSquare(settings.get(SETTING_DRAW_SQUARE, False))
scroll = QScrollArea()
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.shapeMoved.connect(self.setDirty)
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
self.setCentralWidget(scroll)
self.addDockWidget(Qt.RightDockWidgetArea, self.dock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)
self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
self.dockFeatures = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
self._cahed_xmlTree = None # 暂存xml,之前只读写objects的方式会导致其他信息的丢失
# Actions
action = partial(newAction, self)
quit = action(getStr('quit'), self.close,
'Ctrl+Q', 'quit', getStr('quitApp'))
open = action(getStr('openFile'), self.openFile,
'Ctrl+O', 'open', getStr('openFileDetail'))
opentxt = action(getStr('openTxt'), self.openTxt,
'Ctrl+T', 'open', getStr('openTxt'))
opendir = action(getStr('openDir'), self.openDirDialog,
'Ctrl+u', 'open', getStr('openDir'))
changeSavedir = action(getStr('changeSaveDir'), self.changeSavedirDialog,
'Ctrl+r', 'open', getStr('changeSavedAnnotationDir'))
openAnnotation = action(getStr('openAnnotation'), self.openAnnotationDialog,
'Ctrl+Shift+O', 'open', getStr('openAnnotationDetail'))
openNextImg = action(getStr('nextImg'), self.openNextImg,
'd', 'next', getStr('nextImgDetail'))
openPrevImg = action(getStr('prevImg'), self.openPrevImg,
'a', 'prev', getStr('prevImgDetail'))
verify = action(getStr('verifyImg'), self.verifyImg,
'space', 'verify', getStr('verifyImgDetail'))
save = action(getStr('save'), self.saveFile,
'Ctrl+S', 'save', getStr('saveDetail'), enabled=False)
save_format = action('&PascalVOC', self.change_format,
'Ctrl+', 'format_voc', getStr('changeSaveFormat'), enabled=True)
saveAs = action(getStr('saveAs'), self.saveFileAs,
'Ctrl+Shift+S', 'save-as', getStr('saveAsDetail'), enabled=False)
close = action(getStr('closeCur'), self.closeFile, 'Ctrl+W', 'close', getStr('closeCurDetail'))
resetAll = action(getStr('resetAll'), self.resetAll, None, 'resetall', getStr('resetAllDetail'))
color1 = action(getStr('boxLineColor'), self.chooseColor1,
'Ctrl+L', 'color_line', getStr('boxLineColorDetail'))
createMode = action(getStr('crtBox'), self.setCreateMode,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
editMode = action('&Edit\nRectBox', self.setEditMode,
'Ctrl+J', 'edit', u'Move and edit Boxs', enabled=False)
create = action(getStr('crtBox'), self.createShape,
'w', 'new', getStr('crtBoxDetail'), enabled=False)
delete = action(getStr('delBox'), self.deleteSelectedShape,
'Delete', 'delete', getStr('delBoxDetail'), enabled=False)
copy = action(getStr('dupBox'), self.copySelectedShape,
'Ctrl+D', 'copy', getStr('dupBoxDetail'),
enabled=False)
advancedMode = action(getStr('advancedMode'), self.toggleAdvancedMode,
'Ctrl+Shift+A', 'expert', getStr('advancedModeDetail'),
checkable=True)
hideAll = action('&Hide\nRectBox', partial(self.togglePolygons, False),
'Ctrl+H', 'hide', getStr('hideAllBoxDetail'),
enabled=False)
showAll = action('&Show\nRectBox', partial(self.togglePolygons, True),
'Ctrl+A', 'hide', getStr('showAllBoxDetail'),
enabled=False)
onlyShow = action('&单一显示模式', self.setOnlyShow)
help = action(getStr('tutorial'), self.showTutorialDialog, None, 'help', getStr('tutorialDetail'))
showInfo = action(getStr('info'), self.showInfoDialog, None, 'help', getStr('info'))
zoom = QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
self.zoomWidget.setWhatsThis(
u"Zoom in or out of the image. Also accessible with"
" %s and %s from the canvas." % (fmtShortcut("Ctrl+[-+]"),
fmtShortcut("Ctrl+Wheel")))
self.zoomWidget.setEnabled(False)
zoomIn = action(getStr('zoomin'), partial(self.addZoom, 10),
'Ctrl++', 'zoom-in', getStr('zoominDetail'), enabled=False)
zoomOut = action(getStr('zoomout'), partial(self.addZoom, -10),
'Ctrl+-', 'zoom-out', getStr('zoomoutDetail'), enabled=False)
zoomOrg = action(getStr('originalsize'), partial(self.setZoom, 100),
'Ctrl+=', 'zoom', getStr('originalsizeDetail'), enabled=False)
fitWindow = action(getStr('fitWin'), self.setFitWindow,
'Ctrl+F', 'fit-window', getStr('fitWinDetail'),
checkable=True, enabled=False)
fitWidth = action(getStr('fitWidth'), self.setFitWidth,
'Ctrl+Shift+F', 'fit-width', getStr('fitWidthDetail'),
checkable=True, enabled=False)
# Group zoom controls into a list for easier toggling.
zoomActions = (self.zoomWidget, zoomIn, zoomOut,
zoomOrg, fitWindow, fitWidth)
self.zoomMode = self.MANUAL_ZOOM
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
edit = action(getStr('editLabel'), self.editLabel,
'Ctrl+E', 'edit', getStr('editLabelDetail'),
enabled=False)
self.editButton.setDefaultAction(edit)
shapeLineColor = action(getStr('shapeLineColor'), self.chshapeLineColor,
icon='color_line', tip=getStr('shapeLineColorDetail'),
enabled=False)
shapeFillColor = action(getStr('shapeFillColor'), self.chshapeFillColor,
icon='color', tip=getStr('shapeFillColorDetail'),
enabled=False)
labels = self.dock.toggleViewAction()
labels.setText(getStr('showHide'))
labels.setShortcut('Ctrl+Shift+L')
# Label list context menu.
labelMenu = QMenu()
addActions(labelMenu, (edit, delete))
self.labelList.setContextMenuPolicy(Qt.CustomContextMenu)
self.labelList.customContextMenuRequested.connect(
self.popLabelListMenu)
# Draw squares/rectangles
self.drawSquaresOption = QAction('Draw Squares', self)
self.drawSquaresOption.setShortcut('Ctrl+Shift+R')
self.drawSquaresOption.setCheckable(True)
self.drawSquaresOption.setChecked(settings.get(SETTING_DRAW_SQUARE, False))
self.drawSquaresOption.triggered.connect(self.toogleDrawSquare)
# Store actions for further handling.
self.actions = struct(save=save, save_format=save_format, saveAs=saveAs, open=open, close=close,
resetAll=resetAll,
lineColor=color1, create=create, delete=delete, edit=edit, copy=copy,
createMode=createMode, editMode=editMode, advancedMode=advancedMode,
shapeLineColor=shapeLineColor, shapeFillColor=shapeFillColor,
zoom=zoom, zoomIn=zoomIn, zoomOut=zoomOut, zoomOrg=zoomOrg,
fitWindow=fitWindow, fitWidth=fitWidth,
zoomActions=zoomActions,
fileMenuActions=(
open, opentxt, opendir, save, saveAs, close, resetAll, quit),
beginner=(), advanced=(),
editMenu=(edit, copy, delete,
None, color1, self.drawSquaresOption),
beginnerContext=(create, edit, copy, delete),
advancedContext=(createMode, editMode, edit, copy, onlyShow,
delete, shapeLineColor, shapeFillColor),
onLoadActive=(
close, create, createMode, editMode),
onShapesPresent=(saveAs, hideAll, showAll))
self.menus = struct(
file=self.menu('&File'),
edit=self.menu('&Edit'),
view=self.menu('&View'),
help=self.menu('&Help'),
recentFiles=QMenu('Open &Recent'),
labelList=labelMenu)
# Auto saving : Enable auto saving if pressing next
self.autoSaving = QAction(getStr('autoSaveMode'), self)
self.autoSaving.setCheckable(True)
self.autoSaving.setChecked(settings.get(SETTING_AUTO_SAVE, False))
# Force Auto saving: 自动保存时不进行obj是否被修改过的检查,强制保存
# 进行检查会导致标注新图时, 不存在任何obj的图片会直接跳过,不产生xml
self.forceAutoSaving = QAction('&强制自动保存', self)
self.forceAutoSaving.setCheckable(True)
self.forceAutoSaving.setChecked(settings.get(SETTING_AUTO_SAVE_FORCE,False))
# Sync single class mode from PR#106
self.singleClassMode = QAction(getStr('singleClsMode'), self)
self.singleClassMode.setShortcut("Ctrl+Shift+S")
self.singleClassMode.setCheckable(True)
self.singleClassMode.setChecked(settings.get(SETTING_SINGLE_CLASS, False))
self.lastLabel : Optional[str] = None
# 添加对指定类添加快捷键功能
self.shortCutMode=QAction('shortCutMode',self)
self.shortCutMode.setCheckable(True)
self.shortCutMode.setChecked(settings.get(SETTING_SHORT_CUT_MODE, False))
self.shortCutMode.triggered.connect(self.setShortCutMode_slot)
# Add option to enable/disable labels being displayed at the top of bounding boxes
self.displayLabelOption = QAction(getStr('displayLabel'), self)
self.displayLabelOption.setShortcut("Ctrl+Shift+P")
self.displayLabelOption.setCheckable(True)
self.displayLabelOption.setChecked(settings.get(SETTING_PAINT_LABEL, False))
self.displayLabelOption.triggered.connect(self.togglePaintLabelsOption)
addActions(self.menus.file,
(open, opentxt, opendir, changeSavedir, openAnnotation, self.menus.recentFiles, save, save_format,
saveAs, close, resetAll, quit))
addActions(self.menus.help, (help, showInfo))
addActions(self.menus.view, (
self.autoSaving,
self.forceAutoSaving,
onlyShow,
self.singleClassMode,
self.shortCutMode,
self.displayLabelOption,
labels, advancedMode, None,
hideAll, showAll, None,
zoomIn, zoomOut, zoomOrg, None,
fitWindow, fitWidth))
self.menus.file.aboutToShow.connect(self.updateFileMenu)
# Custom context menu for the canvas widget:
addActions(self.canvas.menus[0], self.actions.beginnerContext)
addActions(self.canvas.menus[1], (
action('&Copy here', self.copyShape),
action('&Move here', self.moveShape)))
self.tools = self.toolbar('Tools')
self.actions.beginner = (
open, opentxt, opendir, changeSavedir, openNextImg, openPrevImg, verify, save, save_format, None, create,
copy, delete, None,
zoomIn, zoom, zoomOut, fitWindow, fitWidth)
self.actions.advanced = (
open, opentxt, opendir, changeSavedir, openNextImg, openPrevImg, save, save_format, None,
createMode, editMode, None,
hideAll, showAll)
self.statusBar().showMessage('%s started.' % __appname__)
self.statusBar().show()
# Application state.
self.image = QImage()
self.filePath = ustr(defaultFilename)
self.recentFiles = []
self.maxRecent = 7
self.lineColor = None
self.fillColor = None
self.zoom_level = 100
self.fit_window = False
# Add Chris
self.difficult = False
## Fix the compatible issue for qt4 and qt5. Convert the QStringList to python list
if settings.get(SETTING_RECENT_FILES):
if have_qstring():
recentFileQStringList = settings.get(SETTING_RECENT_FILES)
self.recentFiles = [ustr(i) for i in recentFileQStringList]
else:
self.recentFiles = recentFileQStringList = settings.get(SETTING_RECENT_FILES)
size = settings.get(SETTING_WIN_SIZE, QSize(600, 500))
position = QPoint(0, 0)
saved_position = settings.get(SETTING_WIN_POSE, position)
# Fix the multiple monitors issue
for i in range(QApplication.desktop().screenCount()):
if QApplication.desktop().availableGeometry(i).contains(saved_position):
position = saved_position
break
self.resize(size)
self.move(position)
saveDir = ustr(settings.get(SETTING_SAVE_DIR, None))
self.lastOpenDir = ustr(settings.get(SETTING_LAST_OPEN_DIR, None))
if self.defaultSaveDir is None and saveDir is not None and os.path.exists(saveDir):
self.defaultSaveDir = saveDir
self.statusBar().showMessage('%s started. Annotation will be saved to %s' %
(__appname__, self.defaultSaveDir))
self.statusBar().show()
self.restoreState(settings.get(SETTING_WIN_STATE, QByteArray()))
Shape.line_color = self.lineColor = QColor(settings.get(SETTING_LINE_COLOR, DEFAULT_LINE_COLOR))
Shape.fill_color = self.fillColor = QColor(settings.get(SETTING_FILL_COLOR, DEFAULT_FILL_COLOR))
self.canvas.setDrawingColor(self.lineColor)
# Add chris
Shape.difficult = self.difficult
def xbool(x):
if isinstance(x, QVariant):
return x.toBool()
return bool(x)
if xbool(settings.get(SETTING_ADVANCE_MODE, False)):
self.actions.advancedMode.setChecked(True)
self.toggleAdvancedMode()
# Populate the File menu dynamically.
self.updateFileMenu()
# Since loading the file may take some time, make sure it runs in the background.
if self.filePath and os.path.isdir(self.filePath):
self.queueEvent(partial(self.importDirImages, self.filePath or ""))
elif self.filePath:
self.queueEvent(partial(self.loadFile, self.filePath or ""))
# Callbacks:
self.zoomWidget.valueChanged.connect(self.paintCanvas)
self.populateModeActions()
# Display cursor coordinates at the right of status bar
self.labelCoordinates = QLabel('')
self.statusBar().addPermanentWidget(self.labelCoordinates)
# Open Dir if deafult file
if self.filePath and os.path.isdir(self.filePath):
self.openDirDialog(dirpath=self.filePath, silent=True)
def saveErrImg(self):
if not self.imgPath:
print('self.imgPath is empty')
return
if not self.txtPath:
special_txt_path = os.path.join(os.path.dirname(self.filePath),'special_txt')
if not os.path.exists(special_txt_path):
os.makedirs(special_txt_path)
self.txtPath = os.path.join(special_txt_path,os.path.basename(self.filePath))
if self.txtPath is not None:
absTxtPath = os.path.dirname(self.txtPath)
saveTxtPath = os.path.join(absTxtPath, self.saveTxtName)
#NOTE: 这里txtData可能是有None的
txtData : Set[str] = set()
if os.path.exists(saveTxtPath):
r = open(saveTxtPath, 'r')
txtData = set(r.readlines())
r.close()
if self.isDelete:
if self.imgPath in txtData:
txtData.discard(self.imgPath)
self.status('{} 已从 {} 删除!'.format(os.path.basename(self.imgPath),os.path.basename(saveTxtPath)))
else:
self.status('{} 不在 {} 中!'.format(os.path.basename(self.imgPath),os.path.basename(saveTxtPath)))
return
else:
if self.imgPath not in txtData:
txtData.add(self.imgPath)
self.status('{} 已记录到 {}!'.format(os.path.basename(self.imgPath),os.path.basename(saveTxtPath)))
else:
self.status('{} 已在 {} 中!'.format(os.path.basename(self.imgPath),os.path.basename(saveTxtPath)))
return
w = open(saveTxtPath, 'w')
w.writelines(txtData)
w.close()
def keyReleaseEvent(self, event):
self.isDelete = False
# 这段代码要改表驱动
if event.key() == Qt.Key_Control:
self.canvas.setDrawingShapeToSquare(False)
# 保存质量有问题的图片到以1~9命名的txt中
if event.key() == Qt.Key_1:
self.saveTxtName = '1.txt'
self.saveErrImg()
elif event.key() == Qt.Key_2:
self.saveTxtName = '2.txt'
self.saveErrImg()
elif event.key() == Qt.Key_3:
self.saveTxtName = '3.txt'
self.saveErrImg()
elif event.key() == Qt.Key_4:
self.saveTxtName = '4.txt'
self.saveErrImg()
elif event.key() == Qt.Key_5:
self.saveTxtName = '5.txt'
self.saveErrImg()
elif event.key() == Qt.Key_6:
self.saveTxtName = '6.txt'
self.saveErrImg()
elif event.key() == Qt.Key_7:
self.saveTxtName = '7.txt'
self.saveErrImg()
elif event.key() == Qt.Key_8:
self.saveTxtName = '8.txt'
self.saveErrImg()
elif event.key() == Qt.Key_9:
self.saveTxtName = '9.txt'
self.saveErrImg()
elif event.key() == Qt.Key_0:
self.isDelete = True
self.saveErrImg()
else:
if event.key() in self.shortCutModeKeyMap:
self.lastLabel=self.shortCutModeKeyMap[event.key()]
self.create()
self.createShape()
def keyPressEvent(self, event):
if event.key() == Qt.Key_Control:
# Draw rectangle if Ctrl is pressed
# this function has a big bug
#convert True to False
self.canvas.setDrawingShapeToSquare(False)
## Support Functions ##
def set_format(self, save_format):
if save_format == FORMAT_PASCALVOC:
self.actions.save_format.setText(FORMAT_PASCALVOC)
self.actions.save_format.setIcon(newIcon("format_voc"))
self.usingPascalVocFormat = True
self.usingYoloFormat = False
LabelFile.suffix = XML_EXT
elif save_format == FORMAT_YOLO:
self.actions.save_format.setText(FORMAT_YOLO)
self.actions.save_format.setIcon(newIcon("format_yolo"))
self.usingPascalVocFormat = False
self.usingYoloFormat = True
LabelFile.suffix = TXT_EXT
def change_format(self):
if self.usingPascalVocFormat:
self.set_format(FORMAT_YOLO)
elif self.usingYoloFormat:
self.set_format(FORMAT_PASCALVOC)
def noShapes(self):
return not self.itemsToShapes
def toggleAdvancedMode(self, value=True):
self._beginner = not value
self.canvas.setEditing(True)
self.populateModeActions()
self.editButton.setVisible(not value)
if value:
self.actions.createMode.setEnabled(True)
self.actions.editMode.setEnabled(False)
self.dock.setFeatures(self.dock.features() | self.dockFeatures)
else:
self.dock.setFeatures(self.dock.features() ^ self.dockFeatures)
def populateModeActions(self):
if self.beginner():
tool, menu = self.actions.beginner, self.actions.beginnerContext
else:
tool, menu = self.actions.advanced, self.actions.advancedContext
self.tools.clear()
addActions(self.tools, tool)
self.canvas.menus[0].clear()
addActions(self.canvas.menus[0], menu)
self.menus.edit.clear()
actions = (self.actions.create,) if self.beginner() \
else (self.actions.createMode, self.actions.editMode)
addActions(self.menus.edit, actions + self.actions.editMenu)
def setBeginner(self):
self.tools.clear()
addActions(self.tools, self.actions.beginner)
def setAdvanced(self):
self.tools.clear()
addActions(self.tools, self.actions.advanced)
def setDirty(self):
self.dirty = True
self.actions.save.setEnabled(True)
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
def toggleActions(self, value=True):
"""Enable/Disable widgets which depend on an opened image."""
for z in self.actions.zoomActions:
z.setEnabled(value)
for action in self.actions.onLoadActive:
action.setEnabled(value)
def queueEvent(self, function):
QTimer.singleShot(0, function)
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def resetState(self):
self.itemsToShapes.clear()
self.shapesToItems.clear()
self.labelList.clear()
self.filePath = None
self.imageData = None
self.labelFile = None
self.canvas.resetState()
self.labelCoordinates.clear()
def currentItem(self):
items = self.labelList.selectedItems()
if items:
return items[0]
return None
def addRecentFile(self, filePath):
if filePath in self.recentFiles:
self.recentFiles.remove(filePath)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filePath)
def beginner(self):
return self._beginner
def advanced(self):
return not self.beginner()
def getAvailableScreencastViewer(self):
osName = platform.system()
if osName == 'Windows':
return ['C:\\Program Files\\Internet Explorer\\iexplore.exe']
elif osName == 'Linux':
return ['xdg-open']
elif osName == 'Darwin':
return ['open']
## Callbacks ##
def showTutorialDialog(self):
subprocess.Popen(self.screencastViewer + [self.screencast])
def showInfoDialog(self):
msg = u'Name:{0} \nApp Version:{1} \n{2} '.format(__appname__, 'chiebot', sys.version_info)
QMessageBox.information(self, u'Information', msg)
def createShape(self):
assert self.beginner()
self.canvas.setEditing(False)
self.actions.create.setEnabled(False)
def toggleDrawingSensitive(self, drawing=True):
"""In the middle of drawing, toggling between modes should be disabled."""
self.actions.editMode.setEnabled(not drawing)
if not drawing and self.beginner():
# Cancel creation.
print('Cancel creation.')
self.canvas.setEditing(True)
self.canvas.restoreCursor()
self.actions.create.setEnabled(True)
def toggleDrawMode(self, edit=True):
self.canvas.setEditing(edit)
self.actions.createMode.setEnabled(edit)
self.actions.editMode.setEnabled(not edit)
def setCreateMode(self):
assert self.advanced()
self.toggleDrawMode(False)
def setEditMode(self):
assert self.advanced()
self.toggleDrawMode(True)
self.labelSelectionChanged()
def setOnlyShow(self):
_label, ok = QInputDialog.getText(self, '单一类别显示',
'类别名(多个类别用\';\'号间隔, 空值取消该模式):')
if ok:
if _label == "":
self.only_show = None
else:
self.only_show = _label.split(';')
def updateFileMenu(self):
currFilePath = self.filePath
def exists(filename):
return os.path.exists(filename)
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f !=
currFilePath and exists(f)]
for i, f in enumerate(files):
icon = newIcon('labels')
action = QAction(
icon, '&%d %s' % (i + 1, QFileInfo(f).fileName()), self)
action.triggered.connect(partial(self.loadRecent, f))
menu.addAction(action)
def popLabelListMenu(self, point):
self.menus.labelList.exec_(self.labelList.mapToGlobal(point))
def editLabel(self):
if not self.canvas.editing():
return
item = self.currentItem()
if not item:
return
text = self.labelDialog.popUp(item.text())
if text is not None:
item.setText(text)
item.setBackground(generateColorByText(text))
self.setDirty()
# Tzutalin 20160906 : Add file list and dock to move faster
def fileitemDoubleClicked(self, item=None):
currIndex = self.mImgList.index(ustr(item.text()))
if currIndex < len(self.mImgList):
filename = self.mImgList[currIndex]
if filename:
self.loadFile(filename)
# Add chris
def btnstate(self, item=None):
""" Function to handle difficult examples
Update on each object """
if not self.canvas.editing():
return
item = self.currentItem()
if not item: # If not selected Item, take the first one
item = self.labelList.item(self.labelList.count() - 1)
difficult = self.diffcButton.isChecked()
try:
shape = self.itemsToShapes[item]
except:
pass
# Checked and Update
try:
if difficult != shape.difficult:
shape.difficult = difficult
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
except:
pass
# React to canvas signals.
def shapeSelectionChanged(self, selected=False):
if self._noSelectionSlot:
self._noSelectionSlot = False
else:
shape = self.canvas.selectedShape
if shape:
self.shapesToItems[shape].setSelected(True)
else:
self.labelList.clearSelection()
self.actions.delete.setEnabled(selected)
self.actions.copy.setEnabled(selected)
self.actions.edit.setEnabled(selected)
self.actions.shapeLineColor.setEnabled(selected)
self.actions.shapeFillColor.setEnabled(selected)
def addLabel(self, shape):
shape.paintLabel = self.displayLabelOption.isChecked()
item = HashableQListWidgetItem(shape.label)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
item.setBackground(generateColorByText(shape.label))
self.itemsToShapes[item] = shape
self.shapesToItems[shape] = item
self.labelList.addItem(item)
for action in self.actions.onShapesPresent:
action.setEnabled(True)
def remLabel(self, shape):
if shape is None:
# print('rm empty label')
return
item = self.shapesToItems[shape]
self.labelList.takeItem(self.labelList.row(item))
del self.shapesToItems[shape]
del self.itemsToShapes[item]
def loadLabels(self, shapes):
s = []
for label, points, line_color, fill_color, difficult, imgsize in shapes:
shape = Shape(label=label)
for x, y in points:
# Ensure the labels are within the bounds of the image. If not, fix them.
x, y, snapped = self.canvas.snapPointToCanvas(x, y)
if snapped:
self.setDirty()
shape.addPoint(QPointF(x, y))
shape.difficult = difficult
shape.imgsize = imgsize
shape.close()
s.append(shape)
if line_color:
shape.line_color = QColor(*line_color)
else:
shape.line_color = generateColorByText(label)
if fill_color:
shape.fill_color = QColor(*fill_color)
else:
shape.fill_color = generateColorByText(label)
self.addLabel(shape)
if self.only_show:
self.filter_showBox()
self.canvas.loadShapes(s)
def filter_showBox(self):
# 在单类显示时,把其他类别类别的bbox自动勾选为不显示
for idx in range(len(self.labelList)):
item = self.labelList.item(idx)
shape = self.itemsToShapes[item]
label = item.text()
if label not in self.only_show:
item.setCheckState(Qt.Unchecked)
self.canvas.setShapeVisible(shape, False)
def saveLabels(self, annotationFilePath):
annotationFilePath = ustr(annotationFilePath)
if self.labelFile is None:
self.labelFile = LabelFile()
self.labelFile.verified = self.canvas.verified
def format_shape(s):
return dict(label=s.label,
line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points],
# add chris
difficult=s.difficult)
shapes = [format_shape(shape) for shape in self.canvas.shapes]
# Can add differrent annotation formats here
try:
if self.usingPascalVocFormat is True:
if annotationFilePath[-4:].lower() != ".xml":
annotationFilePath += XML_EXT
self.labelFile.savePascalVocFormat(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb(),
origin_xmlTree=self._cahed_xmlTree)
elif self.usingYoloFormat is True:
if annotationFilePath[-4:].lower() != ".txt":
annotationFilePath += TXT_EXT
self.labelFile.saveYoloFormat(annotationFilePath, shapes, self.filePath, self.imageData, self.labelHist,
self.lineColor.getRgb(), self.fillColor.getRgb())
else:
self.labelFile.save(annotationFilePath, shapes, self.filePath, self.imageData,
self.lineColor.getRgb(), self.fillColor.getRgb())
print('Image:{0} -> Annotation:{1}'.format(self.filePath, annotationFilePath))
return True
except LabelFileError as e:
self.errorMessage(u'Error saving label data', u'<b>%s</b>' % e)
return False
def copySelectedShape(self):
self.addLabel(self.canvas.copySelectedShape())
# fix copy and delete
self.shapeSelectionChanged(True)
def labelSelectionChanged(self):
item = self.currentItem()
if item and self.canvas.editing():
self._noSelectionSlot = True
self.canvas.selectShape(self.itemsToShapes[item])
shape = self.itemsToShapes[item]
# Add Chris
self.diffcButton.setChecked(shape.difficult)
def labelItemChanged(self, item):
shape = self.itemsToShapes[item]
label = item.text()
if label != shape.label:
shape.label = item.text()
shape.line_color = generateColorByText(shape.label)
self.setDirty()
else: # User probably changed item visibility
self.canvas.setShapeVisible(shape, item.checkState() == Qt.Checked)
# Callback functions:
def newShape(self):
"""Pop-up and give focus to the label editor.
position MUST be in global coordinates.
"""
if not self.useDefaultLabelCheckbox.isChecked() or not self.defaultLabelTextLine.text():
if len(self.labelHist) > 0:
self.labelDialog = LabelDialog(
parent=self, listItem=self.labelHist)
if self.shortCutMode.isChecked():
if self.lastLabel in self.shortCutModeKeyMap.values():
text=self.lastLabel
self.lastLabel=None
else:
text = self.labelDialog.popUp(text=self.prevLabelText)
else:
# Sync single class mode from PR#106
if self.singleClassMode.isChecked() and self.lastLabel:
text = self.lastLabel
else:
text = self.labelDialog.popUp(text=self.prevLabelText)
self.lastLabel = text
else:
text = self.defaultLabelTextLine.text()
# Add Chris
self.diffcButton.setChecked(False)
if text is not None:
self.prevLabelText = text
generate_color = generateColorByText(text)
shape = self.canvas.setLastLabel(text, generate_color, generate_color)
self.addLabel(shape)
if self.beginner(): # Switch to edit mode.
self.canvas.setEditing(True)
self.actions.create.setEnabled(True)
else:
self.actions.editMode.setEnabled(True)
self.setDirty()
if text not in self.labelHist:
self.labelHist.append(text)
else:
# self.canvas.undoLastLine()
self.canvas.resetAllLines()
def scrollRequest(self, delta, orientation):
units = - delta / (8 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
def setZoom(self, value):
self.actions.fitWidth.setChecked(False)
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.MANUAL_ZOOM
self.zoomWidget.setValue(value)
def addZoom(self, increment=10):
self.setZoom(self.zoomWidget.value() + increment)
def zoomRequest(self, delta):
# get the current scrollbar positions
# calculate the percentages ~ coordinates
h_bar = self.scrollBars[Qt.Horizontal]
v_bar = self.scrollBars[Qt.Vertical]
# get the current maximum, to know the difference after zooming
h_bar_max = h_bar.maximum()
v_bar_max = v_bar.maximum()
# get the cursor position and canvas size
# calculate the desired movement from 0 to 1
# where 0 = move left
# 1 = move right
# up and down analogous
cursor = QCursor()
pos = cursor.pos()
relative_pos = QWidget.mapFromGlobal(self, pos)
cursor_x = relative_pos.x()
cursor_y = relative_pos.y()
w = self.scrollArea.width()
h = self.scrollArea.height()
# the scaling from 0 to 1 has some padding
# you don't have to hit the very leftmost pixel for a maximum-left movement
margin = 0.1
move_x = (cursor_x - margin * w) / (w - 2 * margin * w)
move_y = (cursor_y - margin * h) / (h - 2 * margin * h)
# clamp the values from 0 to 1
move_x = min(max(move_x, 0), 1)
move_y = min(max(move_y, 0), 1)
# zoom in
units = delta / (8 * 15)
scale = 10
self.addZoom(scale * units)
# get the difference in scrollbar values
# this is how far we can move
d_h_bar_max = h_bar.maximum() - h_bar_max
d_v_bar_max = v_bar.maximum() - v_bar_max
# get the new scrollbar values
new_h_bar_value = h_bar.value() + move_x * d_h_bar_max
new_v_bar_value = v_bar.value() + move_y * d_v_bar_max
h_bar.setValue(new_h_bar_value)
v_bar.setValue(new_v_bar_value)
def setFitWindow(self, value=True):
if value:
self.actions.fitWidth.setChecked(False)
self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM
self.adjustScale()
def setFitWidth(self, value=True):
if value:
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM
self.adjustScale()
def togglePolygons(self, value):
for item, shape in self.itemsToShapes.items():
item.setCheckState(Qt.Checked if value else Qt.Unchecked)
def loadFile(self, filePath=None):
"""Load the specified file, or the last opened file if None."""
self.resetState()
self.canvas.setEnabled(False)
self.imgPath = filePath + '\n'
if filePath is None:
filePath = self.settings.get(SETTING_FILENAME)
# Make sure that filePath is a regular python string, rather than QString
filePath = ustr(filePath)
# Fix bug: An index error after select a directory when open a new file.
unicodeFilePath = ustr(filePath)
unicodeFilePath = os.path.abspath(unicodeFilePath)
# Tzutalin 20160906 : Add file list and dock to move faster
# Highlight the file item
if unicodeFilePath and self.fileListWidget.count() > 0:
if unicodeFilePath in self.mImgList:
index = self.mImgList.index(unicodeFilePath)
fileWidgetItem = self.fileListWidget.item(index)
fileWidgetItem.setSelected(True)
else:
self.fileListWidget.clear()
self.mImgList.clear()
if unicodeFilePath and os.path.exists(unicodeFilePath):
if LabelFile.isLabelFile(unicodeFilePath):
try:
self.labelFile = LabelFile(unicodeFilePath)
except LabelFileError as e:
self.errorMessage(u'Error opening file',
(u"<p><b>%s</b></p>"
u"<p>Make sure <i>%s</i> is a valid label file.")
% (e, unicodeFilePath))
self.status("Error reading %s" % unicodeFilePath)
return False
self.imageData = self.labelFile.imageData
self.lineColor = QColor(*self.labelFile.lineColor)
self.fillColor = QColor(*self.labelFile.fillColor)
self.canvas.verified = self.labelFile.verified
else:
# Load image:
# read data first and store for saving into label file.
self.imageData = read(unicodeFilePath, None)
self.labelFile = None
self.canvas.verified = False
image = QImage.fromData(self.imageData)
if image.isNull():
self.errorMessage(u'Error opening file',
u"<p>Make sure <i>%s</i> is a valid image file." % unicodeFilePath)
self.status("Error reading %s" % unicodeFilePath)
return False
self.status("Loaded %s" % os.path.basename(unicodeFilePath))
self.image = image
self.filePath = unicodeFilePath
self.canvas.loadPixmap(QPixmap.fromImage(image))
if self.labelFile:
self.loadLabels(self.labelFile.shapes)
self.setClean()
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
self.addRecentFile(self.filePath)
self.toggleActions(True)
# Label xml file and show bound box according to its filename
# if self.usingPascalVocFormat is True:
if self.defaultSaveDir is not None:
basename = os.path.basename(
os.path.splitext(self.filePath)[0])
xmlPath = os.path.join(self.defaultSaveDir, basename + XML_EXT)
txtPath = os.path.join(self.defaultSaveDir, basename + TXT_EXT)
"""Annotation file priority:
PascalXML > YOLO
"""
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
else:
xmlPath = os.path.splitext(filePath)[0] + XML_EXT
txtPath = os.path.splitext(filePath)[0] + TXT_EXT
if os.path.isfile(xmlPath):
self.loadPascalXMLByFilename(xmlPath)
elif os.path.isfile(txtPath):
self.loadYOLOTXTByFilename(txtPath)
self.setWindowTitle(__appname__ + ' ' + filePath)
# Default : select last item if there is at least one item
if self.labelList.count():
self.labelList.setCurrentItem(self.labelList.item(self.labelList.count() - 1))
self.labelList.item(self.labelList.count() - 1).setSelected(True)
self.canvas.setFocus(True)
return True
else:
#修改filePath不存在时,保存错误图片路径到err_img_path.txt中
print('{} is not in the img filde.'.format(filePath))
absErrPath = os.path.dirname(self.txtPath)
saveErrTxtPath = os.path.join(absErrPath, 'err_img_path.txt')
err_img_data :Set[str] = set()
if os.path.exists(saveErrTxtPath):
with open(saveErrTxtPath,'r') as err_r:
err_img_data = set(err_r.readlines())
with open(saveErrTxtPath,'w') as err_w:
err_img_data.add(unicodeFilePath+'\n')
err_w.writelines(err_img_data)
self.filePath = unicodeFilePath
self.addRecentFile(self.filePath)
return False
def resizeEvent(self, event):
if self.canvas and not self.image.isNull() \
and self.zoomMode != self.MANUAL_ZOOM:
self.adjustScale()
super(MainWindow, self).resizeEvent(event)
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def closeEvent(self, event):
if not self.mayContinue():
event.ignore()
settings = self.settings
# If it loads images from dir, don't load it at the begining
if self.dirname is None:
settings[SETTING_FILENAME] = self.filePath if self.filePath else ''
else:
settings[SETTING_FILENAME] = ''
settings[SETTING_WIN_SIZE] = self.size()
settings[SETTING_WIN_POSE] = self.pos()
settings[SETTING_WIN_STATE] = self.saveState()
settings[SETTING_LINE_COLOR] = self.lineColor
settings[SETTING_FILL_COLOR] = self.fillColor
settings[SETTING_RECENT_FILES] = self.recentFiles
settings[SETTING_ADVANCE_MODE] = not self._beginner
if self.defaultSaveDir and os.path.exists(self.defaultSaveDir):
settings[SETTING_SAVE_DIR] = ustr(self.defaultSaveDir)
else:
settings[SETTING_SAVE_DIR] = ''
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
settings[SETTING_LAST_OPEN_DIR] = self.lastOpenDir
else:
settings[SETTING_LAST_OPEN_DIR] = ''
settings[SETTING_AUTO_SAVE] = self.autoSaving.isChecked()
settings[SETTING_AUTO_SAVE_FORCE]=self.forceAutoSaving.isChecked()
settings[SETTING_SINGLE_CLASS] = self.singleClassMode.isChecked()
settings[SETTING_PAINT_LABEL] = self.displayLabelOption.isChecked()
settings[SETTING_DRAW_SQUARE] = self.drawSquaresOption.isChecked()
settings.save()
def setShortCutMode_slot(self):
# TODO: 这里要考虑后续是否要记住上一次的设置
if self.shortCutMode.isChecked():
_label, ok = QInputDialog.getText(self, '快速标注方式',
'使用设定的快捷键直接标注该类的框,格式为{快捷键}={类别名}, 以,号间隔,仅支持字母,注意不要设置\"awsdAWSD\".空值取消该模式,本模式和单例模式不兼容,启用成功会关闭单例模式',text=self.preShortCutModeSetting)
if ok:
if not _label:
self.shortCutModeSuccess(False)
return
else:
self.preShortCutModeSetting=_label
keyLabelMap=self.str2dict(_label)
if keyLabelMap is None:
warningbox=QMessageBox.warning(self,'输入的字符串不符合规则','输入的字符串不可尝试使用awsdAWSD作为快捷键!!!')
self.shortCutModeSuccess(False)
return
value_set=set(keyLabelMap.values())
label_set=set(self.labelHist)
#若手滑写了预设label没有的类
if value_set-label_set:
reply=QMessageBox.question(self,'输入字符串有问题!',
"\n".join([
"输入字串中包含了预设标签中没有的类:",
repr(value_set-label_set),
"确定添加?",
"取消的话,预设标签中不包含的类将不会被设置为快捷方式",]),
QMessageBox.Yes|QMessageBox.No,
QMessageBox.No)
if reply ==QMessageBox.Yes:
self.shortCutModeKeyMap={getattr(Qt,'Key_'+k):v for k,v in keyLabelMap.items()}
else:
self.shortCutModeKeyMap={getattr(Qt,'Key_'+k):v for k,v in keyLabelMap.items() if v in label_set}
if not self.shortCutModeKeyMap:
warningbox=QMessageBox.warning(self,'吐槽','好好检查下输入字串,没有一个预设标签起作用哦!!!')
self.shortCutModeSuccess(False)
else:
self.shortCutModeSuccess(True)
else:
self.shortCutModeKeyMap={getattr(Qt,'Key_'+k):v for k,v in keyLabelMap.items()}
self.shortCutModeSuccess(True)
else:
self.shortCutModeSuccess(False)
def shortCutModeSuccess(self,sucess_flag:bool):
if sucess_flag:
self.shortCutMode.setChecked(True)
self.singleClassMode.setChecked(False)
self.singleClassMode.setCheckable(False)
self.lastLabel=None
self.status('启用快捷模式成功,单例模式将被强制关闭并禁用')
else:
self.shortCutModeKeyMap.clear()
self.singleClassMode.setChecked(False)
self.singleClassMode.setCheckable(True)
self.lastLabel=None
self.status('快捷模式取消,单例模式现在可用')
def loadRecent(self, filename):
if self.mayContinue():
self.loadFile(filename)
def scanAllImages(self, folderPath):
extensions = ['.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
images = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(root, file)
path = ustr(os.path.abspath(relativePath))
images.append(path)
natural_sort(images, key=lambda x: x.lower())
return images
def changeSavedirDialog(self, _value=False):
if self.defaultSaveDir is not None:
path = ustr(self.defaultSaveDir)
else:
path = '.'
dirpath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Save annotations to the directory' % __appname__, path,
QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks))
if dirpath is not None and len(dirpath) > 1:
self.defaultSaveDir = dirpath
self.statusBar().showMessage('%s . Annotation will be saved to %s' %
('Change saved folder', self.defaultSaveDir))
self.statusBar().show()
def openAnnotationDialog(self, _value=False):
if self.filePath is None:
self.statusBar().showMessage('Please select image first')
self.statusBar().show()
return
path = os.path.dirname(ustr(self.filePath)) \
if self.filePath else '.'
if self.usingPascalVocFormat:
filters = "Open Annotation XML file (%s)" % ' '.join(['*.xml'])
filename = ustr(QFileDialog.getOpenFileName(self, '%s - Choose a xml file' % __appname__, path, filters))
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadPascalXMLByFilename(filename)
def openTxt(self, _value=False, dirpath=None, silent=False):
self.isTxt = True
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = '.'
if silent != True:
filters = '{}'.format('*')
# TODO:这里defaultOpenDirPath若不是本地路径,会很慢,最好的解决方法是自己继承QFileDialog改成异步的,这里先脏修改了
# 直接修改最后打开文件夹为txt文件所在文件夹
targetDirPath = ustr(QFileDialog.getOpenFileName(self,
'%s - Open Txt File' % __appname__, defaultOpenDirPath,
filters,""))
if not os.path.isfile(targetDirPath[0]):
if self.txtPath is not None:
targetDirPath = (self.txtPath, '*')
warningbox=QMessageBox.warning(self,'注意',
'你没有选中任何文件列表,这里将使用之前的文件列表作为当前文件列表, 之前文件列表路径为: \n {}'
.format(self.txtPath))
else:
warning =QMessageBox.warning(self,'警告','注意你没选中任何文件列表,且没有任何值能作为合理的文件列表')
# targetDirPath = ustr(QFileDialog.getOpenFileName(self,
# '%s - Open Txt File' % __appname__, defaultOpenDirPath,
# filters,"",QFileDialog.DontUseNativeDialog))
else:
targetDirPath = ustr(defaultOpenDirPath)
if os.path.isfile(targetDirPath[0]):
self.lastOpenDir=os.path.dirname(targetDirPath[0])
self.importTxtImages(targetDirPath)
def openDirDialog(self, _value=False, dirpath=None, silent=False):
self.isTxt = False
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else '.'
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = os.path.dirname(self.filePath) if self.filePath else '.'
if silent != True:
targetDirPath = ustr(QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__,
defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks))
else:
targetDirPath = ustr(defaultOpenDirPath)
self.importDirImages(targetDirPath)
def importTxtImages(self, txtFile):
if (not os.path.exists(txtFile[0])) or (not os.path.isfile(txtFile[0])):
print('not open txt file,please check txt path')
return
self.txtPath=txtFile[0]
with open(txtFile[0], 'r',encoding='utf-8') as fr:
lines = fr.readlines()
self.defaultSaveDir = None
self.fileListWidget.clear()
new_line = []
new_line = list(map(lambda x: x.strip('\n'), lines))
self.mImgList = new_line
for imgPath in self.mImgList:
item = QListWidgetItem(imgPath)
self.fileListWidget.addItem(item)
self.filePath = None
self.openNextImg()
def importDirImages(self, dirpath):
if not self.mayContinue() or not dirpath:
return
self.lastOpenDir = dirpath
self.dirname = dirpath
self.filePath = None
self.fileListWidget.clear()
self.mImgList = self.scanAllImages(dirpath)
self.openNextImg()
for imgPath in self.mImgList:
item = QListWidgetItem(imgPath)
self.fileListWidget.addItem(item)
def verifyImg(self, _value=False):
# Proceding next image without dialog if having any label
if self.filePath is not None:
try:
self.labelFile.toggleVerify()
except AttributeError:
# If the labelling file does not exist yet, create if and
# re-save it with the verified attribute.
self.saveFile()
if self.labelFile != None:
self.labelFile.toggleVerify()
else:
return
self.canvas.verified = self.labelFile.verified
self.paintCanvas()
self.saveFile()
def openPrevImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.forceAutoSaving.isChecked() or self.dirty is True:
self.saveFile()
if self.isTxt is True:
# FIXME: 当self.filePath不存在时,这里会崩,raise ValueError: None is not in list
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
filename = self.mImgList[currIndex - 1]
self.defaultSaveDir = filename.split(os.path.basename(filename))[0]
else:
if self.isTxt is True:
filename = None
if self.filePath is None:
filename = self.mImgList[0]
self.defaultSaveDir = filename.split(os.path.basename(filename))[0]
self.loadFile(filename)
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 < len(self.mImgList):
filename = self.mImgList[currIndex - 1]
self.defaultSaveDir = filename.split(os.path.basename(filename))[0]
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
if self.filePath is None:
return
currIndex = self.mImgList.index(self.filePath)
if currIndex - 1 >= 0:
filename = self.mImgList[currIndex - 1]
if filename:
self.loadFile(filename)
def openNextImg(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
if self.defaultSaveDir is not None:
if self.forceAutoSaving.isChecked() or self.dirty is True:
self.saveFile()
if self.isTxt is True:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
if filename:
self.defaultSaveDir = filename.split(os.path.basename(filename))[0]
else:
if self.isTxt is True:
filename = None
if self.filePath is None:
filename = self.mImgList[0]
self.defaultSaveDir = filename.split(os.path.basename(filename))[0]
self.loadFile(filename)
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
self.defaultSaveDir = filename.split(os.path.basename(filename))[0]
else:
self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
filename = None
if self.filePath is None:
filename = self.mImgList[0]
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
if filename:
self.loadFile(filename)
def openNextImgTxt(self, _value=False):
# Proceding prev image without dialog if having any label
if self.autoSaving.isChecked():
# self.defaultSaveDir = self.filePath.split(os.path.basename(self.filePath))[0]
if self.defaultSaveDir is not None:
if self.autoSaving.isChecked() or self.dirty is True:
self.saveFile()
else:
# self.changeSavedirDialog()
return
if not self.mayContinue():
return
if len(self.mImgList) <= 0:
return
filename = None
if self.filePath is None:
filename = self.mImgList[0]
else:
currIndex = self.mImgList.index(self.filePath)
if currIndex + 1 < len(self.mImgList):
filename = self.mImgList[currIndex + 1]
if filename:
self.loadFile(filename)
def openFile(self, _value=False):
self.isTxt = False
if not self.mayContinue():
return
path = os.path.dirname(ustr(self.filePath)) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower() for fmt in QImageReader.supportedImageFormats()]
filters = "Image & Label files (%s)" % ' '.join(formats + ['*%s' % LabelFile.suffix])
filename = QFileDialog.getOpenFileName(self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
def saveFile(self, _value=False):
if self.defaultSaveDir is not None and len(ustr(self.defaultSaveDir)):
if self.filePath:
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(ustr(self.defaultSaveDir), savedFileName)
self._saveFile(savedPath)
else:
imgFileDir = os.path.dirname(self.filePath)
imgFileName = os.path.basename(self.filePath)
savedFileName = os.path.splitext(imgFileName)[0]
savedPath = os.path.join(imgFileDir, savedFileName)
self._saveFile(savedPath if self.labelFile
else self.saveFileDialog(removeExt=False))
def saveFileAs(self, _value=False):
assert not self.image.isNull(), "cannot save empty image"
self._saveFile(self.saveFileDialog())
def saveFileDialog(self, removeExt=True):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % LabelFile.suffix
openDialogPath = self.currentPath()
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix(LabelFile.suffix[1:])
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
fullFilePath = ustr(dlg.selectedFiles()[0])
if removeExt:
return os.path.splitext(fullFilePath)[0] # Return file path without the extension.
else:
return fullFilePath
return ''
def _saveFile(self, annotationFilePath):
if annotationFilePath and self.saveLabels(annotationFilePath):
self.setClean()
self.statusBar().showMessage('Saved to %s' % annotationFilePath)
self.statusBar().show()
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
def resetAll(self):
self.settings.reset()
self.close()
proc = QProcess()
proc.startDetached(os.path.abspath(__file__))
def mayContinue(self):
return not (self.dirty and not self.discardChangesDialog())
def discardChangesDialog(self):
yes, no = QMessageBox.Yes, QMessageBox.No
msg = u'You have unsaved changes, proceed anyway?'
return yes == QMessageBox.warning(self, u'Attention', msg, yes | no)
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def chooseColor1(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.lineColor = color
Shape.line_color = color
self.canvas.setDrawingColor(color)
self.canvas.update()
self.setDirty()
def deleteSelectedShape(self):
self.remLabel(self.canvas.deleteSelected())
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
def chshapeLineColor(self):
color = self.colorDialog.getColor(self.lineColor, u'Choose line color',
default=DEFAULT_LINE_COLOR)
if color:
self.canvas.selectedShape.line_color = color
self.canvas.update()
self.setDirty()
def chshapeFillColor(self):
color = self.colorDialog.getColor(self.fillColor, u'Choose fill color',
default=DEFAULT_FILL_COLOR)
if color:
self.canvas.selectedShape.fill_color = color
self.canvas.update()
self.setDirty()
def copyShape(self):
self.canvas.endMove(copy=True)
self.addLabel(self.canvas.selectedShape)
self.setDirty()
def moveShape(self):
self.canvas.endMove(copy=False)
self.setDirty()
def loadPredefinedClasses(self, predefClassesFile):
if os.path.exists(predefClassesFile) is True:
with codecs.open(predefClassesFile, 'r', 'utf8') as f:
for line in f:
line = line.strip()
if self.labelHist is None:
self.labelHist = [line]
else:
self.labelHist.append(line)
def loadPascalXMLByFilename(self, xmlPath):
if self.filePath is None:
return
if os.path.isfile(xmlPath) is False:
return
self.set_format(FORMAT_PASCALVOC)
tVocParseReader = PascalVocReader(xmlPath)
shapes = tVocParseReader.getShapes()
self._cahed_xmlTree = tVocParseReader.getXmlTree()
self.loadLabels(shapes)
self.canvas.verified = tVocParseReader.verified
def loadYOLOTXTByFilename(self, txtPath):
if self.filePath is None:
return
if os.path.isfile(txtPath) is False:
return
self.set_format(FORMAT_YOLO)
tYoloParseReader = YoloReader(txtPath, self.image)
shapes = tYoloParseReader.getShapes()
print(shapes)
self.loadLabels(shapes)
self.canvas.verified = tYoloParseReader.verified
def togglePaintLabelsOption(self):
for shape in self.canvas.shapes:
shape.paintLabel = self.displayLabelOption.isChecked()
def toogleDrawSquare(self):
self.canvas.setDrawingShapeToSquare(self.drawSquaresOption.isChecked())
def inverted(color):
return QColor(*[255 - v for v in color.getRgb()])
def read(filename, default=None):
try:
with open(filename, 'rb') as f:
return f.read()
except:
return default
def get_main_app(argv=[]):
"""
Standard boilerplate Qt application code.
Do everything but app.exec_() -- so that we can test the application in one thread
"""
app = QApplication(argv)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("app"))
# Tzutalin 201705+: Accept extra agruments to change predefined class file
# Usage : labelImg.py image predefClassFile saveDir
win = MainWindow(argv[1] if len(argv) >= 2 else None,
argv[2] if len(argv) >= 3 else os.path.join(
os.path.dirname(sys.argv[0]),
'data', 'predefined_classes.txt'),
argv[3] if len(argv) >= 4 else None)
win.show()
return app, win
def main():
'''construct main app and run it'''
app, _win = get_main_app(sys.argv)
return app.exec_()
if __name__ == '__main__':
sys.exit(main())
|
py | b40e2a6e2960890e429555f08b91e96bee9b6aec | # -*- coding: utf-8 -*-
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
from dash.exceptions import PreventUpdate
import pandas as pd
from app import app
# Get data
filename = 'assets/rtfMRI_methods_review_included_studies_procsteps.txt'
df_studies = pd.read_csv(filename, sep='\t', lineterminator='\r')
df_studies = df_studies.dropna(axis='columns')
df_plot = df_studies.copy()
colnames = {
'author':'Author',
'vendor': 'Vendor',
'magnet': 'Field strength',
'software': 'Software',
'stc': 'Slice time correction',
'mc': '3D volume realignment',
'ss': 'Spatial smoothing',
'dr': 'Drift removal',
'hmp': 'Realignment parameter regression',
'ts': 'Temporal smoothing',
'ff': 'Frequency filtering',
'or': 'Outlier removal',
'droi': 'Differential ROI',
'resp': 'Respiratory noise removal',
'doi': 'Article DOI'
}
plotnames = [
{'label': 'Vendor', 'value': 'vendor'},
{'label': 'Field strength', 'value': 'magnet'},
{'label': 'Software', 'value': 'software'},
{'label': 'Slice time correction', 'value': 'stc'},
{'label': '3D volume realignment', 'value': 'mc'},
{'label': 'Spatial smoothing', 'value': 'ss'},
{'label': 'Drift removal', 'value': 'dr'},
{'label': 'Realignment parameter regression', 'value': 'hmp'},
{'label': 'Temporal smoothing', 'value': 'ts'},
{'label': 'Frequency filtering', 'value': 'ff'},
{'label': 'Outlier removal', 'value': 'or'},
{'label': 'Differential ROI', 'value': 'droi'},
{'label': 'Respiratory noise removal', 'value': 'resp'},
]
srs = df_plot['vendor'].value_counts()
xx = srs.index.to_list()
yy = srs.values
dataframe = df_plot.loc[df_plot['vendor'] == 'Siemens']
srs2 = dataframe['magnet'].value_counts()
xx2 = srs2.index.to_list()
yy2 = srs2.values
main_md = dcc.Markdown('''
In this section you can visualize and interact with the coded data from 128 real-tme fMRI neurofeedback studies.
There are two plots below, for which you can display data of a method selected from the respective dropdowns.
Say you want to view the distribution of scanner vendors used in these studies, select the `Vendor` option for the plot on the left hand side.
You can then *hover* over each of the bars in the plot to see the actual number of studies per vendor, e.g. 18 studies used a Philips scanner.
You can also *click* on the bar to display these specific studies in a table below the plots.
Say, now, that you want to see which software packages were used for each of the vendors, select the `Software` option for the plot on the right hand side.
By hovering over each bar on the `Vendor` plot, the `Software` plot will update with the relevant distribution.
''')
layout = html.Div([
html.Div([
html.H2('Visualize'),
],
style={
'marginBottom': 25,
'marginTop': 25,
'marginLeft': '5%',
'maxWidth': '90%',
'textAlign': 'center'
}
),
html.Div(main_md,
style={
'marginLeft': '5%',
'maxWidth': '90%',
}
),
html.Br([]),
html.Div(
[
dbc.Row(
[
dbc.Col(dcc.Dropdown(
id='drop-1',
options=plotnames,
value='vendor',
),
width={"size": 4, "offset": 1}, # figure out offset
),
dbc.Col(dcc.Dropdown(
id='drop-2',
options=plotnames,
value='vendor',
),
width={"size": 4, "offset": 2},
),
],
justify="start"
),
html.Br([]),
dbc.Row(
[
dbc.Col(html.H6(
id='graph-1-title',
children='Vendor (hover to show options of second feature; click to display studies)',
style={
'textAlign': 'center',
}),
# width={"size": 6, "offset": 3}
),
dbc.Col(html.H6(
id='graph-2-title',
children='Field strength options when Vendor = Siemens',
style={
'textAlign': 'center',
}),
# width={"size": 6, "offset": 3}
)
]
),
dbc.Row(
[
dbc.Col(html.Div(
dcc.Graph(
id='graph-1',
figure={
'data': [
{'x': xx, 'y': yy, 'type': 'bar', 'name': 'Vendors', 'marker': {'color': '#9EBC9F'}},
],
}
),
)),
dbc.Col(html.Div(
dcc.Graph(
id='graph-2',
figure={
'data': [
{'x': xx2, 'y': yy2, 'type': 'bar', 'name': 'Field strength', 'marker': {'color': '#D3B88C'}},
],
}
),
)),
]
),
],
style={
'marginBottom': 25,
'marginTop': 25,
'marginLeft': '5%',
'maxWidth': '90%',
}
),
html.Div(
id='table-1',
style={
'marginBottom': 25,
'marginTop': 25,
'marginLeft': '5%',
'maxWidth': '90%',
}
)
])
# Callback for updating graph 1
@app.callback(
[Output('graph-1', 'figure'),
Output('graph-1-title', 'children')],
[Input('drop-1','value')]
)
def update_graph(feature):
srs = df_plot[feature].value_counts()
xx = srs.index.to_list()
yy = srs.values
txt = colnames[feature]
fig={
'data': [
{'x': xx, 'y': yy, 'type': 'bar', 'name': txt, 'marker': {'color': '#9EBC9F'}},
],
}
title = txt + ' (hover to show options of second feature; click to display studies)'
return [fig, title]
# Callback for updating dropdown2 based on dropdown1 value
@app.callback(
[Output('drop-2', 'options'),
Output('drop-2', 'value')],
[Input('drop-1','value')]
)
def reset_dropdown2_opts(value):
plotnames_2 = [x for x in plotnames if x['value'] != value]
value_2 = plotnames_2[0]['value']
return plotnames_2, value_2
# Callback for updating graph 2 based on graph1 hoverData and dropdowns
@app.callback(
[Output('graph-2', 'figure'),
Output('graph-2-title', 'children')],
[Input('graph-1', 'hoverData'),
Input('drop-1','value'),
Input('drop-2','value')]
)
def update_graph(hoverData, feature1, feature2):
if hoverData is None or feature1 is None or feature2 is None:
raise PreventUpdate
else:
x = hoverData['points'][0]['x']
dataframe = df_plot.loc[df_plot[feature1] == x]
srs = dataframe[feature2].value_counts()
xx = srs.index.to_list()
yy = srs.values
txt = colnames[feature2] + ' options when ' + colnames[feature1] + ' = ' + x
fig={
'data': [
{'x': xx, 'y': yy, 'type': 'bar', 'name': txt, 'marker': {'color': '#D3B88C'}},
],
}
title = txt
return [fig, title]
# Callback for showing table 1 after filtering on feature 1
@app.callback(
Output('table-1', 'children'),
[Input('graph-1', 'clickData'),
Input('drop-1','value')])
def generate_table(clickData, feature, max_rows=20):
if clickData is None:
raise PreventUpdate
else:
x = clickData['points'][0]['x']
dataframe = df_plot.loc[df_plot[feature] == x]
table=html.Table([
html.Thead(
html.Tr([html.Th(col) for col in list(colnames.values())])
),
html.Tbody([
html.Tr([
html.Td(writeElement(i, col, dataframe)) for col in dataframe.columns],
) for i in range(min(len(dataframe), max_rows))
]),
],
className='qcsummary',
)
# class="table-row" data-href="http://tutorialsplane.com"
heading=html.H4('Showing studies where ' + colnames[feature] + ' = ' + x,
style={'textAlign': 'center',})
# table = dbc.Table.from_dataframe(dataframe,
# striped=True,
# bordered=True,
# hover=True,
# responsive=True,
# className='qcsummary'
# )
return [heading, table]
def writeElement(i, col, dataframe):
if col == 'doi':
hrf = 'https://doi.org/'+dataframe.iloc[i][col]
return html.A([dataframe.iloc[i][col]], href=hrf, target="_blank")
else:
return dataframe.iloc[i][col] |
py | b40e2a76ec38fcd32c65ae58356c4631c3256b9d | from flask import Blueprint, url_for
from flask_api import status
blueprint = Blueprint('api_root', __name__)
@blueprint.route("/api")
def index():
content = {'rooms': url_for('api_rooms.index', _external=True)}
return content, status.HTTP_200_OK
|
py | b40e2be4e11b6ba0330e13940d816b5672493d33 | # -*- coding: utf-8 -*-
#
# format documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 18 06:46:16 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, re, subprocess
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
libpath = os.path.join(curr_path, '../python/')
sys.path.insert(0, libpath)
sys.path.insert(0, curr_path)
from sphinx_util import MarkdownParser, AutoStructify
# -- mock out modules
import mock
MOCK_MODULES = ['numpy', 'scipy', 'scipy.sparse', 'sklearn', 'matplotlib']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.ifconfig', 'breathe']
# breathe_default_project = "format"
breathe_domain_by_extension = {"h" : "cpp"}
# General information about the project.
project = u'mxnet'
author = u'%s developers' % project
copyright = u'2015, %s' % author
github_doc_root = 'https://github.com/dmlc/mxnet/tree/master/doc/'
doc_root = 'http://mxnet.dmlc.ml/'
# add markdown parser
MarkdownParser.github_doc_root = github_doc_root
source_parsers = {
'.md': MarkdownParser,
'.Rmd': MarkdownParser,
}
os.environ['MXNET_BUILD_DOC'] = '1'
# Version information.
# import mxnet
# version = mxnet.__version__
# release = mxnet.__version__
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'breathe',
]
# Use breathe to include doxygen documents
breathe_projects = {'mxnet' : 'doxygen/xml/'}
breathe_default_project = 'mxnet'
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
# source_suffix = '.rst'
source_suffix = ['.rst', '.md', '.Rmd']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# Version and release are passed from CMake.
#version = None
# The full version, including alpha/beta/rc tags.
#release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['virtualenv']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'c++'
primary_domain = 'cpp'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'mxnet-theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_static']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': 'relations.html'
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'formatdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'format.tex', u'C++ Format Documentation',
u'Victor Zverovich', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'format', u'format Documentation',
[u'Victor Zverovich'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'format', u'format Documentation',
u'Victor Zverovich', 'format', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# hook for doxygen
def run_doxygen(folder):
"""Run the doxygen make command in the designated folder."""
try:
retcode = subprocess.call("cd %s; make doxygen" % folder, shell=True)
if retcode < 0:
sys.stderr.write("doxygen terminated by signal %s" % (-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: %s" % e)
def generate_doxygen_xml(app):
"""Run the doxygen make commands"""
run_doxygen('..')
def setup(app):
# Add hook for building doxygen xml when needed
# no c++ API for now
app.connect("builder-inited", generate_doxygen_xml)
app.add_config_value('recommonmark_config', {
'url_resolver': lambda url: doc_root + url,
}, True)
app.add_transform(AutoStructify)
|
py | b40e2bece01b3bee9dbc8055f61325fbc8594ca7 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class RebootInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'RebootInstance','ens')
self.set_method('POST')
def get_ForceStop(self):
return self.get_query_params().get('ForceStop')
def set_ForceStop(self,ForceStop):
self.add_query_param('ForceStop',ForceStop)
def get_Version(self):
return self.get_query_params().get('Version')
def set_Version(self,Version):
self.add_query_param('Version',Version)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId) |
py | b40e2c52bdfb1c5b0d36c59f7f68e0501280933c | from django.conf.urls import url
#import modules for spyne (wsdl interface)
from spyne.protocol.soap import Soap11
from spyne.server.django import DjangoView
#import modules for authentication
from django.contrib.auth.decorators import login_required
from proj.pred import views
urlpatterns = [
url(r'^$', views.index, name='pred.index'),
url(r'^submit-seq/$', views.submit_seq, name='pred.submit_seq'),
url(r'^thanks/$', views.thanks, name='pred.thanks'),
url(r'^queue/$', views.get_queue, name='pred.get_queue'),
url(r'^running/$', views.get_running, name='pred.get_running'),
url(r'^finished/$', views.get_finished_job, name='pred.get_finished_job'),
url(r'^failed/$', views.get_failed_job, name='pred.get_failed_job'),
url(r'^download/$', views.download, name='pred.download'),
url(r'^help-wsdl-api/$', views.help_wsdl_api, name='pred.help_wsdl_api'),
url(r'^help/$', views.get_help, name='pred.get_help'),
url(r'^news/$', views.get_news, name='pred.get_news'),
url(r'^serverstatus/$', views.get_serverstatus, name='pred.get_serverstatus'),
url(r'^reference/$', views.get_reference, name='pred.get_reference'),
url(r'^example/$', views.get_example, name='pred.get_example'),
url(r'^oldserver/$', views.oldserver, name='pred.oldserver'),
url(r'^result/(?P<jobid>[^\/]+)/$', views.get_results, name='pred.get_results'),
url(r'^result/(?P<jobid>[^\/]+)/(?P<seqindex>seq_[0-9]+)/$',
views.get_results_eachseq, name='pred.get_results_eachseq'),
url(r'^login/', login_required(views.login), name="pred.login"),
# for spyne wsdl
url(r'^api_submitseq/', DjangoView.as_view(application=views.app_submitseq)),
]
|
py | b40e2d90869ecfec73db10e477acf5ce037d4a6c | from __future__ import annotations
import copy
import itertools
from typing import (
TYPE_CHECKING,
Dict,
List,
Sequence,
)
import numpy as np
from pandas._libs import internals as libinternals
from pandas._typing import (
ArrayLike,
DtypeObj,
Manager,
Shape,
)
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.cast import (
ensure_dtype_can_hold_na,
find_common_type,
)
from pandas.core.dtypes.common import (
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_sparse,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
is_valid_na_for_dtype,
isna_all,
)
import pandas.core.algorithms as algos
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
)
from pandas.core.internals.array_manager import ArrayManager
from pandas.core.internals.blocks import (
ensure_block_shape,
new_block,
)
from pandas.core.internals.managers import BlockManager
if TYPE_CHECKING:
from pandas import Index
def _concatenate_array_managers(
mgrs_indexers, axes: List[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate array managers into one.
Parameters
----------
mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
ArrayManager
"""
# reindex all arrays
mgrs = []
for mgr, indexers in mgrs_indexers:
for ax, indexer in indexers.items():
mgr = mgr.reindex_indexer(axes[ax], indexer, axis=ax, allow_dups=True)
mgrs.append(mgr)
if concat_axis == 1:
# concatting along the rows -> concat the reindexed arrays
# TODO(ArrayManager) doesn't yet preserve the correct dtype
arrays = [
concat_compat([mgrs[i].arrays[j] for i in range(len(mgrs))])
for j in range(len(mgrs[0].arrays))
]
return ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False)
else:
# concatting along the columns -> combine reindexed arrays in a single manager
assert concat_axis == 0
arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
return ArrayManager(arrays, [axes[1], axes[0]], verify_integrity=False)
def concatenate_managers(
mgrs_indexers, axes: List[Index], concat_axis: int, copy: bool
) -> Manager:
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
Returns
-------
BlockManager
"""
# TODO(ArrayManager) this assumes that all managers are of the same type
if isinstance(mgrs_indexers[0][0], ArrayManager):
return _concatenate_array_managers(mgrs_indexers, axes, concat_axis, copy)
concat_plans = [
_get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
]
concat_plan = _combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
if len(join_units) == 1 and not join_units[0].indexers:
b = join_units[0].block
values = b.values
if copy:
values = values.copy()
else:
values = values.view()
b = b.make_block_same_class(values, placement=placement)
elif _is_uniform_join_units(join_units):
blk = join_units[0].block
vals = [ju.block.values for ju in join_units]
if not blk.is_extension:
# _is_uniform_join_units ensures a single dtype, so
# we can use np.concatenate, which is more performant
# than concat_compat
values = np.concatenate(vals, axis=blk.ndim - 1)
else:
# TODO(EA2D): special-casing not needed with 2D EAs
values = concat_compat(vals)
if not isinstance(values, ExtensionArray):
values = values.reshape(1, len(values))
if blk.values.dtype == values.dtype:
# Fast-path
b = blk.make_block_same_class(values, placement=placement)
else:
b = new_block(values, placement=placement, ndim=blk.ndim)
else:
new_values = _concatenate_join_units(join_units, concat_axis, copy=copy)
b = new_block(new_values, placement=placement, ndim=len(axes))
blocks.append(b)
return BlockManager(blocks, axes)
def _get_mgr_concatenation_plan(mgr: BlockManager, indexers: Dict[int, np.ndarray]):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape_list = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape_list[ax] = len(indexer)
mgr_shape = tuple(mgr_shape_list)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = algos.take_nd(mgr.blknos, ax0_indexer, fill_value=-1)
blklocs = algos.take_nd(mgr.blklocs, ax0_indexer, fill_value=-1)
else:
if mgr.is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
# error: Incompatible types in assignment (expression has type "None", variable
# has type "ndarray")
ax0_indexer = None # type: ignore[assignment]
blknos = mgr.blknos
blklocs = mgr.blklocs
plan = []
for blkno, placements in libinternals.get_blkno_placements(blknos, group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape_list = list(mgr_shape)
shape_list[0] = len(placements)
shape = tuple(shape_list)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs)
and
# Fastpath detection of join unit not
# needing to reindex its block: no ax0
# reindexing took place and block
# placement was sequential before.
(
(
ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1
)
or
# Slow-ish detection: all indexer locs
# are sequential (and length match is
# checked above).
(np.diff(ax0_blk_indexer) == 1).all()
)
)
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
class JoinUnit:
def __init__(self, block, shape: Shape, indexers=None):
# Passing shape explicitly is required for cases when block is None.
if indexers is None:
indexers = {}
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self) -> str:
return f"{type(self).__name__}({repr(self.block)}, {self.indexers})"
@cache_readonly
def needs_filling(self) -> bool:
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
blk = self.block
if blk is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return blk.dtype
return ensure_dtype_can_hold_na(blk.dtype)
def is_valid_na_for(self, dtype: DtypeObj) -> bool:
"""
Check that we are all-NA of a type/dtype that is compatible with this dtype.
Augments `self.is_na` with an additional check of the type of NA values.
"""
if not self.is_na:
return False
if self.block is None:
return True
if self.dtype == object:
values = self.block.values
return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))
if self.dtype.kind == dtype.kind == "M" and not is_dtype_equal(
self.dtype, dtype
):
# fill_values match but we should not cast self.block.values to dtype
return False
na_value = self.block.fill_value
return is_valid_na_for_dtype(na_value, dtype)
@cache_readonly
def is_na(self) -> bool:
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values = self.block.values
if is_sparse(self.block.values.dtype):
return False
elif self.block.is_extension:
# TODO(EA2D): no need for special case with 2D EAs
values_flat = values
else:
values_flat = values.ravel(order="K")
return isna_all(values_flat)
def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_valid_na_for(empty_dtype):
blk_dtype = getattr(self.block, "dtype", None)
if blk_dtype == np.dtype("object"):
# we want to avoid filling with np.nan if we are
# using None; we already know that we are all
# nulls
values = self.block.values.ravel(order="K")
if len(values) and values[0] is None:
fill_value = None
if is_datetime64tz_dtype(empty_dtype):
# TODO(EA2D): special case unneeded with 2D EAs
i8values = np.full(self.shape[1], fill_value.value)
return DatetimeArray(i8values, dtype=empty_dtype)
elif is_extension_array_dtype(blk_dtype):
pass
elif isinstance(empty_dtype, ExtensionDtype):
cls = empty_dtype.construct_array_type()
missing_arr = cls._from_sequence([], dtype=empty_dtype)
ncols, nrows = self.shape
assert ncols == 1, ncols
empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
return missing_arr.take(
empty_arr, allow_fill=True, fill_value=fill_value
)
else:
# NB: we should never get here with empty_dtype integer or bool;
# if we did, the missing_arr.fill would cast to gibberish
missing_arr = np.empty(self.shape, dtype=empty_dtype)
missing_arr.fill(fill_value)
return missing_arr
if (not self.indexers) and (not self.block._can_consolidate):
# preserve these for validation in concat_compat
return self.block.values
if self.block.is_bool and not self.block.is_categorical:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
elif self.block.is_extension:
values = self.block.values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.values
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = algos.take_nd(values, indexer, axis=ax)
return values
def _concatenate_join_units(
join_units: List[JoinUnit], concat_axis: int, copy: bool
) -> ArrayLike:
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype = _get_empty_dtype(join_units)
has_none_blocks = any(unit.block is None for unit in join_units)
upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)
to_concat = [
ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)
for ju in join_units
]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy:
if isinstance(concat_values, np.ndarray):
# non-reindexed (=not yet copied) arrays are made into a view
# in JoinUnit.get_reindexed_values
if concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = concat_values.copy()
elif any(isinstance(t, ExtensionArray) for t in to_concat):
# concatting with at least one EA means we are concatting a single column
# the non-EA values are 2D arrays with shape (1, n)
to_concat = [t if isinstance(t, ExtensionArray) else t[0, :] for t in to_concat]
concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True)
concat_values = ensure_block_shape(concat_values, 2)
else:
concat_values = concat_compat(to_concat, axis=concat_axis)
return concat_values
def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):
"""
Find the NA value to go with this dtype.
"""
if isinstance(dtype, ExtensionDtype):
return dtype.na_value
elif dtype.kind in ["m", "M"]:
return dtype.type("NaT")
elif dtype.kind in ["f", "c"]:
return dtype.type("NaN")
elif dtype.kind == "b":
return None
elif dtype.kind in ["i", "u"]:
if not has_none_blocks:
return None
return np.nan
elif dtype.kind == "O":
return np.nan
raise NotImplementedError
def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> DtypeObj:
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.dtype(np.float64)
if _is_uniform_reindex(join_units):
# FIXME: integrate property
empty_dtype = join_units[0].block.dtype
return empty_dtype
has_none_blocks = any(unit.block is None for unit in join_units)
dtypes = [
unit.dtype for unit in join_units if unit.block is not None and not unit.is_na
]
if not len(dtypes):
dtypes = [unit.dtype for unit in join_units if unit.block is not None]
dtype = find_common_type(dtypes)
if has_none_blocks:
dtype = ensure_dtype_can_hold_na(dtype)
return dtype
def _is_uniform_join_units(join_units: List[JoinUnit]) -> bool:
"""
Check if the join units consist of blocks of uniform type that can
be concatenated using Block.concat_same_type instead of the generic
_concatenate_join_units (which uses `concat_compat`).
"""
# TODO: require dtype match in addition to same type? e.g. DatetimeTZBlock
# cannot necessarily join
return (
# all blocks need to have the same type
all(type(ju.block) is type(join_units[0].block) for ju in join_units) # noqa
and
# no blocks that would get missing values (can lead to type upcasts)
# unless we're an extension dtype.
all(not ju.is_na or ju.block.is_extension for ju in join_units)
and
# no blocks with indexers (as then the dimensions do not fit)
all(not ju.indexers for ju in join_units)
and
# only use this path when there is something to concatenate
len(join_units) > 1
)
def _is_uniform_reindex(join_units) -> bool:
return (
# TODO: should this be ju.block._can_hold_na?
all(ju.block and ju.block.is_extension for ju in join_units)
and len({ju.block.dtype.name for ju in join_units}) == 1
)
def _trim_join_unit(join_unit: JoinUnit, length: int) -> JoinUnit:
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers, shape=extra_shape)
def _combine_concat_plans(plans, concat_axis: int):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# _trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:], _trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
|
py | b40e302b8903dd29f8a10575231826afab1fc3ad | from ._generic_client_interceptor import _GenericClientInterceptor
from . import _header_client_interceptor
def get_authorization_interceptor(auth_token: str) -> _GenericClientInterceptor:
return _header_client_interceptor.header_adder_interceptor(
"authorization", auth_token
)
|
py | b40e3180e56786e05c635b4dd81ee3405896ab80 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.db import models
from django.contrib.auth.models import User
def upload_path(instance, filename):
return '/'.join(['fdp', str(instance.mois), str(instance.structure), filename])
class Paie(models.Model):
mois = models.CharField(max_length=32, blank=False, null=False)
fichier = models.FileField(blank=True, null=True, upload_to=upload_path)
author = models.CharField(max_length=32, blank=False, null=False)
structure = models.CharField(max_length=32, blank=True, null=True)
created_on = models.DateTimeField(auto_now_add=True, null=True, blank=True)
class Meta:
ordering = ['-created_on']
def __str__(self):
return self.mois
class Consultant(models.Model):
STATUS = (
('WGT', _('Portage')),
('WG', _('CDI')),
('WGS', _('JEI')),
)
nom = models.CharField(max_length=50, blank=False, null=False)
prenom = models.CharField(max_length=50, blank=True, null=True)
email = models.CharField(max_length=50, blank=False, null=False)
type = models.CharField(max_length=32, choices=STATUS, default='WGT')
created_on = models.DateTimeField(auto_now_add=True, null=True, blank=True)
def upload_path_fdp(instance, filename):
return '/'.join(['fdp', str(instance.mois), str(instance.structure), str(instance.name), filename])
class Log(models.Model):
STATUS = (
('OK', _('OK')),
('KO', _('KO')),
)
name = models.CharField(max_length=50, blank=False, null=False)
email = models.CharField(max_length=50, blank=False, null=False)
structure = models.CharField(max_length=50, blank=False, null=False)
mois = models.CharField(max_length=50, blank=True, null=True)
status = models.CharField(max_length=32, choices=STATUS, default='OK')
fdp = models.FileField(blank=True, null=True, upload_to=upload_path_fdp)
msg = models.TextField(blank=False, null=True)
created_on = models.DateTimeField(auto_now_add=True, null=True, blank=True) |
py | b40e31cab502fa83e6e777a686e822630d915627 | from flask import Flask, render_template, url_for, flash, redirect, abort,request
from . import main
from .forms import RegistrationForm, LoginForm,UpdateProfile ,PostForm
from flask_login import login_required
from ..models import User
from .. import db,photos
posts = [
{
'author': 'dancan dante',
'title': 'funny blog',
'content': 'In France, a chemist named Pilatre de Rozier tested the flammability of hydrogen by gulping a mouthful and blowing across an open flame, proving at a stroke that hydrogen is indeed explosively combustible and that eyebrows are not necessarily a permanent feature of one’s face.',
'date_posted': 'april 22, 2019'
},
{
'author': 'mikel karije',
'title': 'Business blog',
'content': ' Woody Allen calls the planning portion of writing the “pace the floor” part. If you are an analytical thinker, which many business writers are, this is natural for you, but accept that sometimes it makes your brain hurt as your mind has to figure out all the interconnections. ',
'date_posted': 'april 22, 2019'
},
{
'author': 'john dumelo',
'title': 'Health blog',
'content': 'You can focus your blog on healthy recipes, diets, morning routines, at-home workouts, gym workouts, healthcare tips, and advice – you name it. Lots of opportunities for massive traffic and income here.',
'date_posted': 'april 22, 2019'
},
{
'author': 'PRofessor',
'title': 'Inspirational blog',
'content': 'spreading happiness, simplicity, and clarity in everyone’s lives. Inspirational quotes and articles provided by guest authors and editors enlighten people that are interested in topics related to inner clarity and self-growth.',
'date_posted': 'april 22, 2019'
}
]
@main.route('/')
@login_required
def index():
return render_template('index.html', posts=posts)
@main.route('/login', methods=['GET','_POST_'])
def login():
form = LoginForm()
return render_template('login.html', title='LOGIN',form=form)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/post/new',methods = ['GET','POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
flash('The Post Has been Updated', 'successful')
return redirect(url_for('home'))
return render_template('new_post.html', title='New Post', form=form) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.