text
stringlengths 4
1.02M
| meta
dict |
---|---|
"""
WSGI config for inonemonth project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from os.path import abspath, dirname
from sys import path
SITE_ROOT = dirname(dirname(abspath(__file__)))
path.append(SITE_ROOT)
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "jajaja.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "inonemonth.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| {
"content_hash": "46e285d2f45eed7b3ff2a30e546a34d0",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 81,
"avg_line_length": 42.37837837837838,
"alnum_prop": 0.7914540816326531,
"repo_name": "robrechtdr/inonemonth",
"id": "a7ac28b6c58486ace2e0de75ce2a09cfa15dc734",
"size": "1568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inonemonth/inonemonth/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6087"
},
{
"name": "Gherkin",
"bytes": "437"
},
{
"name": "HTML",
"bytes": "40190"
},
{
"name": "JavaScript",
"bytes": "14208"
},
{
"name": "Python",
"bytes": "113451"
},
{
"name": "Shell",
"bytes": "3208"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class MACAddrList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
MACAddrList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'devices': 'list[MACAddrInfo]'
}
self.attribute_map = {
'devices': 'devices'
}
self._devices = None
@property
def devices(self):
"""
Gets the devices of this MACAddrList.
:return: The devices of this MACAddrList.
:rtype: list[MACAddrInfo]
"""
return self._devices
@devices.setter
def devices(self, devices):
"""
Sets the devices of this MACAddrList.
:param devices: The devices of this MACAddrList.
:type: list[MACAddrInfo]
"""
self._devices = devices
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| {
"content_hash": "c47117119ee363732c6476f87763450c",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 77,
"avg_line_length": 26.901785714285715,
"alnum_prop": 0.5585794888815134,
"repo_name": "realms-team/solmanager",
"id": "f782288c762ba9c091b1c4fa5155f2c9b4de8f63",
"size": "3030",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "libs/smartmeshsdk-REL-1.3.0.1/libs/VManagerSDK/vmanager/models/mac_addr_list.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3408"
},
{
"name": "CSS",
"bytes": "1148"
},
{
"name": "HTML",
"bytes": "1568"
},
{
"name": "JavaScript",
"bytes": "1430296"
},
{
"name": "Makefile",
"bytes": "8195"
},
{
"name": "Python",
"bytes": "3428922"
},
{
"name": "Smarty",
"bytes": "5800"
}
],
"symlink_target": ""
} |
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
auth_token = "your_auth_token"
workspace_sid = "WSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
client = Client(account_sid, auth_token)
workers = client.taskrouter.workspaces(workspace_sid).workers.list()
for worker in workers:
print(worker.friendly_name)
| {
"content_hash": "2b96381b06063670c17f59ca65e7c0f1",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 68,
"avg_line_length": 30.615384615384617,
"alnum_prop": 0.7939698492462312,
"repo_name": "teoreteetik/api-snippets",
"id": "a9c0cfd166e38d46d09b9a80b60ddc69b903720c",
"size": "471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/taskrouter/workers/list/get/example-1/example-1.6.x.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "643369"
},
{
"name": "HTML",
"bytes": "335"
},
{
"name": "Java",
"bytes": "943336"
},
{
"name": "JavaScript",
"bytes": "539577"
},
{
"name": "M",
"bytes": "117"
},
{
"name": "Mathematica",
"bytes": "93"
},
{
"name": "Objective-C",
"bytes": "46198"
},
{
"name": "PHP",
"bytes": "538312"
},
{
"name": "Python",
"bytes": "467248"
},
{
"name": "Ruby",
"bytes": "470316"
},
{
"name": "Shell",
"bytes": "1564"
},
{
"name": "Swift",
"bytes": "36563"
}
],
"symlink_target": ""
} |
import os
import glob
import hashlib
import mimetypes
from flask import Flask, render_template, request, redirect
from flask import url_for, send_from_directory, abort
from werkzeug import secure_filename
# Initialize the Flask application
app = Flask(__name__)
# This is the path to the upload directory
app.config['DATA_FOLDER'] = 'data/'
# This is the path to the processing directory
app.config['PROCESS_FOLDER'] = 'process/'
# These are the extension that we are accepting to be uploaded
file_exts = ['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif', '.shard']
app.config['ALLOWED_EXTENSIONS'] = set(file_exts)
def setup():
"""Setup the proper data store directories."""
if not os.path.exists(app.config['DATA_FOLDER']):
os.makedirs(app.config['DATA_FOLDER'])
if not os.path.exists(app.config['PROCESS_FOLDER']):
os.makedirs(app.config['PROCESS_FOLDER'])
def allowed_file(filename):
"""For a given file, return whether it's an allowed type or not."""
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
def files_in_cache():
"""Returns a list of files in the hashserv cache."""
cache_files = glob.glob(app.config['DATA_FOLDER'] + "/*")
new_cache_files = []
for item in cache_files:
# glob returns the path, so we just want to get the filename
# because were already know where it is stored
filelist = item.split("\\")
new_cache_files.append(filelist[len(filelist)-1])
return new_cache_files
def get_hash(filepath):
"""Get the sha256 hash of the passed file."""
hasher = hashlib.sha256()
with open(filepath, 'rb') as afile:
buf = afile.read()
hasher.update(buf)
return hasher.hexdigest()
# This route will show a form to perform an AJAX request
# jQuery is loaded to execute the request and update the
# value of the operation.
@app.route('/')
def index(files=None, data_folder=None):
return render_template('index.html', files=files_in_cache())
# Route that will process the file upload
@app.route('/api/upload', methods=['POST'])
def upload():
# Get the name of the uploaded file
file = request.files['file']
# Check if the file is one of the allowed types/extensions
if file and allowed_file(file.filename):
# Make the filename safe, remove unsupported chars
filename = secure_filename(file.filename)
try:
# Move the file from the temporal folder the processing folder
process_filepath = os.path.join(app.config['PROCESS_FOLDER'],
filename)
file.save(process_filepath)
# Find the hash of the data
file_hash = get_hash(process_filepath)
hash_filepath = os.path.join(app.config['DATA_FOLDER'], file_hash)
# Copy the file from processing to data
os.rename(process_filepath, hash_filepath)
# Returns the file hash
return redirect(url_for('index'))
except FileExistsError:
return "Duplicate file."
else:
return "Invalid file."
# This route is expecting a parameter containing the name
# of a file. Then it will locate that file on the upload
# directory and show it on the browser, so if the user uploads
# an image, that image is going to be show after the upload
@app.route('/api/download/<filehash>')
def download_file(filehash):
return send_from_directory(app.config['DATA_FOLDER'], filehash,
as_attachment=True)
@app.route('/api/serve/<filehash>/<extension>')
def serve_file(filehash, extension):
# find mimtype from passed extension
try:
mimetypes.init()
mapped_mimetype = mimetypes.types_map["." + extension]
except KeyError:
return "415 Unsupported Media Type."
return send_from_directory(app.config['DATA_FOLDER'], filehash,
mimetype=mapped_mimetype)
if __name__ == '__main__':
# Make sure process and data directories are created
setup()
# Run the Flask app
app.run(
host="0.0.0.0",
port=int("5000"),
debug=True
) | {
"content_hash": "de6f502e41ebe3d06585ecf8346c9406",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 69,
"avg_line_length": 30.352,
"alnum_prop": 0.7150764364786505,
"repo_name": "iswt/hashserv",
"id": "158071b73e61c029ccf5f4a93158e7166a5c8e1a",
"size": "3794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1103"
},
{
"name": "Python",
"bytes": "4198"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import collections
import base64
import os
import re
import sys
import subprocess
import time
import tempfile
import warnings
import logging
import xml.dom.minidom
from uiautomator import Device as UiaDevice
from uiautomator import AutomatorDeviceObject
from PIL import Image
from atx import consts
from atx import errors
from atx import patch
from atx import base
from atx import imutils
from atx import strutils
from atx.device import Bounds
from atx import logutils
from atx.device.mixin import DeviceMixin, hook_wrap
from atx import adbkit
_DISPLAY_RE = re.compile(
r'.*DisplayViewport{valid=true, .*orientation=(?P<orientation>\d+), .*deviceWidth=(?P<width>\d+), deviceHeight=(?P<height>\d+).*')
_PROP_PATTERN = re.compile(
r'\[(?P<key>.*?)\]:\s*\[(?P<value>.*)\]')
_INPUT_METHOD_RE = re.compile(
r'mCurMethodId=([-_./\w]+)')
UINode = collections.namedtuple('UINode', [
'xml',
'bounds',
'selected', 'checkable', 'clickable', 'scrollable', 'focusable', 'enabled', 'focused', 'long_clickable',
'password',
'class_name',
'index', 'resource_id',
'text', 'content_desc',
'package'])
log = logutils.getLogger(__name__)
def getenv(name, default_value=None, type=str):
value = os.getenv(name)
return type(value) if value else default_value
class AndroidDevice(DeviceMixin, UiaDevice):
def __init__(self, serialno=None, **kwargs):
"""Initial AndroidDevice
Args:
serialno: string specify which device
Returns:
AndroidDevice object
Raises:
EnvironmentError
"""
self.__display = None
serialno = serialno or getenv('ATX_ADB_SERIALNO', None)
self._host = kwargs.get('host', getenv('ATX_ADB_HOST', '127.0.0.1'))
self._port = kwargs.get('port', getenv('ATX_ADB_PORT', 5037, type=int))
self._adb_client = adbkit.Client(self._host, self._port)
self._adb_device = self._adb_client.device(serialno)
kwargs['adb_server_host'] = kwargs.pop('host', self._host)
kwargs['adb_server_port'] = kwargs.pop('port', self._port)
UiaDevice.__init__(self, serialno, **kwargs)
DeviceMixin.__init__(self)
self._randid = base.id_generator(5)
self._uiauto = super(AndroidDevice, self)
self.screen_rotation = None
self.screenshot_method = consts.SCREENSHOT_METHOD_AUTO
self.last_screenshot = None
@property
def serial(self):
""" Android Device Serial Number """
return self._adb_device.serial
@property
def adb_server_host(self):
return self._host
@property
def adb_server_port(self):
return self._port
@property
def adb_device(self):
return self._adb_device
@property
def wlan_ip(self):
""" Wlan IP """
return self.adb_shell(['getprop', 'dhcp.wlan0.ipaddress']).strip()
def forward(self, device_port, local_port=None):
"""Forward device port to local
Args:
device_port: port inside device
local_port: port on PC, if this value is None, a port will random pick one.
Returns:
tuple, (host, local_port)
"""
port = self._adb_device.forward(device_port, local_port)
return (self._host, port)
@property
def current_package_name(self):
return self.info['currentPackageName']
def is_app_alive(self, package_name):
""" Deprecated: use current_package_name instaed.
Check if app in running in foreground """
return self.info['currentPackageName'] == package_name
def sleep(self, secs=None):
"""Depreciated. use delay instead."""
if secs is None:
self._uiauto.sleep()
else:
self.delay(secs)
@property
def display(self):
"""Virtual keyborad may get small d.info['displayHeight']
"""
if self.__display:
return self.__display
w, h = (0, 0)
for line in self.adb_shell('dumpsys display').splitlines():
m = _DISPLAY_RE.search(line, 0)
if not m:
continue
w = int(m.group('width'))
h = int(m.group('height'))
# o = int(m.group('orientation'))
w, h = min(w, h), max(w, h)
self.__display = collections.namedtuple('Display', ['width', 'height'])(w, h)
return self.__display
w, h = self.info['displayWidth'], self.info['displayHeight']
w, h = min(w, h), max(w, h)
self.__display = collections.namedtuple('Display', ['width', 'height'])(w, h)
return self.__display
@property
def rotation(self):
"""
Rotaion of the phone
0: normal
1: home key on the right
2: home key on the top
3: home key on the left
"""
if self.screen_rotation in range(4):
return self.screen_rotation
return self.adb_device.rotation() or self.info['displayRotation']
@rotation.setter
def rotation(self, r):
if not isinstance(r, int):
raise TypeError("r must be int")
self.screen_rotation = r
def _minicap_params(self):
"""
Used about 0.1s
uiautomator d.info is now well working with device which has virtual menu.
"""
rotation = self.rotation
# rotation not working on SumSUNG 9502
return '{x}x{y}@{x}x{y}/{r}'.format(
x=self.display.width,
y=self.display.height,
r=rotation*90)
def _screenshot_minicap(self):
phone_tmp_file = '/data/local/tmp/_atx_screen-{}.jpg'.format(self._randid)
local_tmp_file = tempfile.mktemp(prefix='atx-tmp-', suffix='.jpg')
command = 'LD_LIBRARY_PATH=/data/local/tmp /data/local/tmp/minicap -P {} -s > {}'.format(
self._minicap_params(), phone_tmp_file)
try:
self.adb_shell(command)
self.adb_cmd(['pull', phone_tmp_file, local_tmp_file])
image = imutils.open_as_pillow(local_tmp_file)
# Fix rotation not rotate right.
(width, height) = image.size
if self.screen_rotation in [1, 3] and width < height:
image = image.rotate(90, Image.BILINEAR, expand=True)
return image
except IOError:
raise IOError("Screenshot use minicap failed.")
finally:
if os.path.exists(local_tmp_file):
os.unlink(local_tmp_file)
self.adb_shell(['rm', phone_tmp_file])
def _screenshot_uiauto(self):
tmp_file = tempfile.mktemp(prefix='atx-tmp-', suffix='.jpg')
self._uiauto.screenshot(tmp_file)
try:
return imutils.open_as_pillow(tmp_file)
except IOError:
raise IOError("Screenshot use uiautomator failed.")
finally:
base.remove_force(tmp_file)
@hook_wrap(consts.EVENT_CLICK)
def click(self, x, y):
"""
Touch specify position
Args:
x, y: int
Returns:
None
"""
return self._uiauto.click(x, y)
@hook_wrap(consts.EVENT_SCREENSHOT)
def screenshot(self, filename=None):
"""
Take screen snapshot
Args:
filename: filename where save to, optional
Returns:
PIL.Image object
Raises:
TypeError, IOError
"""
screen = None
if self.screenshot_method == consts.SCREENSHOT_METHOD_UIAUTOMATOR:
screen = self._screenshot_uiauto()
elif self.screenshot_method == consts.SCREENSHOT_METHOD_MINICAP:
screen = self._screenshot_minicap()
elif self.screenshot_method == consts.SCREENSHOT_METHOD_AUTO:
try:
screen = self._screenshot_minicap()
self.screenshot_method = consts.SCREENSHOT_METHOD_MINICAP
except IOError:
screen = self._screenshot_uiauto()
self.screenshot_method = consts.SCREENSHOT_METHOD_UIAUTOMATOR
else:
raise TypeError('Invalid screenshot_method')
if filename:
save_dir = os.path.dirname(filename) or '.'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
screen.save(filename)
self.last_screenshot = screen
return screen
def raw_cmd(self, *args, **kwargs):
return self.adb_device.raw_cmd(*args, **kwargs)
def adb_cmd(self, command):
'''
Run adb command, for example: adb(['pull', '/data/local/tmp/a.png'])
Args:
command: string or list of string
Returns:
command output
'''
if isinstance(command, list) or isinstance(command, tuple):
return self.adb_device.run_cmd(*list(command))
return self.adb_device.run_cmd(command)
def adb_shell(self, command):
'''
Run adb shell command
Args:
command: string or list of string
Returns:
command output
'''
if isinstance(command, list) or isinstance(command, tuple):
return self.adb_cmd(['shell'] + list(command))
else:
return self.adb_cmd(['shell'] + [command])
@property
def properties(self):
'''
Android Properties, extracted from `adb shell getprop`
Returns:
dict of props, for
example:
{'ro.bluetooth.dun': 'true'}
'''
props = {}
for line in self.adb_shell(['getprop']).splitlines():
m = _PROP_PATTERN.match(line)
if m:
props[m.group('key')] = m.group('value')
return props
def start_app(self, package_name, activity=None):
'''
Start application
Args:
package_name: string like com.example.app1
Returns time used (unit second), if activity is not None
'''
_pattern = re.compile(r'TotalTime: (\d+)')
if activity is None:
self.adb_shell(['monkey', '-p', package_name, '-c', 'android.intent.category.LAUNCHER', '1'])
else:
output = self.adb_shell(['am', 'start', '-W', '-n', '%s/%s' % (package_name, activity)])
m = _pattern.search(output)
if m:
return int(m.group(1))/1000.0
def stop_app(self, package_name, clear=False):
'''
Stop application
Args:
package_name: string like com.example.app1
clear: bool, remove user data
Returns:
None
'''
if clear:
self.adb_shell(['pm', 'clear', package_name])
else:
self.adb_shell(['am', 'force-stop', package_name])
return self
def takeSnapshot(self, filename):
'''
Deprecated, use screenshot instead.
'''
warnings.warn("deprecated, use snapshot instead", DeprecationWarning)
return self.screenshot(filename)
def _parse_xml_node(self, node):
# ['bounds', 'checkable', 'class', 'text', 'resource_id', 'package']
__alias = {
'class': 'class_name',
'resource-id': 'resource_id',
'content-desc': 'content_desc',
'long-clickable': 'long_clickable',
}
def parse_bounds(text):
m = re.match(r'\[(\d+),(\d+)\]\[(\d+),(\d+)\]', text)
if m is None:
return None
return Bounds(*map(int, m.groups()))
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def convstr(v):
return v.encode('utf-8')
parsers = {
'bounds': parse_bounds,
'text': convstr,
'class_name': convstr,
'resource_id': convstr,
'package': convstr,
'checkable': str2bool,
'scrollable': str2bool,
'focused': str2bool,
'clickable': str2bool,
'enabled': str2bool,
'selected': str2bool,
'long_clickable': str2bool,
'focusable': str2bool,
'password': str2bool,
'index': int,
'content_desc': convstr,
}
ks = {}
for key, value in node.attributes.items():
key = __alias.get(key, key)
f = parsers.get(key)
if value is None:
ks[key] = None
elif f:
ks[key] = f(value)
for key in parsers.keys():
ks[key] = ks.get(key)
ks['xml'] = node
return UINode(**ks)
def dump_nodes(self):
"""Dump current screen UI to list
Returns:
List of UINode object, For
example:
[UINode(
bounds=Bounds(left=0, top=0, right=480, bottom=168),
checkable=False,
class_name='android.view.View',
text='',
resource_id='',
package='com.sonyericsson.advancedwidget.clock')]
"""
xmldata = self._uiauto.dump()
dom = xml.dom.minidom.parseString(xmldata.encode('utf-8'))
root = dom.documentElement
nodes = root.getElementsByTagName('node')
ui_nodes = []
for node in nodes:
ui_nodes.append(self._parse_xml_node(node))
return ui_nodes
def _escape_text(self, s, utf7=False):
s = s.replace(' ', '%s')
if utf7:
s = s.encode('utf-7')
return s
def keyevent(self, keycode):
"""call adb shell input keyevent ${keycode}
Args:
- keycode(string): for example, KEYCODE_ENTER
keycode need reference:
http://developer.android.com/reference/android/view/KeyEvent.html
"""
self.adb_shell(['input', 'keyevent', keycode])
def type(self, text, enter=False):
"""Input some text, this method has been tested not very stable on some device.
"Hi world" maybe spell into "H iworld"
Args:
- text: string (text to input), better to be unicode
- enter(bool): input enter at last
The android source code show that
space need to change to %s
insteresting thing is that if want to input %s, it is really unconvinent.
android source code can be found here.
https://android.googlesource.com/platform/frameworks/base/+/android-4.4.2_r1/cmds/input/src/com/android/commands/input/Input.java#159
"""
is_utf7ime = (self.current_ime() == 'android.unicode.ime/.Utf7ImeService')
if is_utf7ime:
estext = base64.b64encode(text.encode('utf-7'))
self.adb_shell(['am', 'broadcast', '-a', 'ADB_INPUT_TEXT', '--es', 'format', 'base64', '--es', 'msg', estext])
else:
first = True
for s in text.split('%s'):
if first:
first = False
else:
self.adb_shell(['input', 'text', '%'])
s = 's' + s
if s == '':
continue
estext = self._escape_text(s)
self.adb_shell(['input', 'text', estext])
if enter:
self.keyevent('KEYCODE_ENTER')
def clear_text(self, count=100):
"""Clear text
Args:
- count (int): send KEY_DEL count
"""
is_utf7ime = (self.current_ime() == 'android.unicode.ime/.Utf7ImeService')
if not is_utf7ime:
raise RuntimeError("Input method must be 'android.unicode.ime'")
self.keyevent('KEYCODE_MOVE_END')
self.adb_shell(['am', 'broadcast', '-a', 'ADB_INPUT_CODE', '--ei', 'code', '67', '--ei', 'repeat', str(count)])
def input_methods(self):
"""
Get all input methods
Return example: ['com.sohu.inputmethod.sogou/.SogouIME', 'android.unicode.ime/.Utf7ImeService']
"""
imes = []
for line in self.adb_shell(['ime', 'list', '-s', '-a']).splitlines():
line = line.strip()
if re.match('^.+/.+$', line):
imes.append(line)
return imes
def current_ime(self):
''' Get current input method '''
dumpout = self.adb_shell(['dumpsys', 'input_method'])
m = _INPUT_METHOD_RE.search(dumpout)
if m:
return m.group(1)
# Maybe no need to raise error
# raise RuntimeError("Canot detect current input method")
| {
"content_hash": "06cfe73e737c7fe2db0f4334efd1a3e6",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 141,
"avg_line_length": 31.460377358490565,
"alnum_prop": 0.5491183879093199,
"repo_name": "Andy-hpliu/AirtestX",
"id": "eabc2226591b7328b07a4bcbdc7e43975b685011",
"size": "16744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atx/device/android.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "230"
},
{
"name": "CSS",
"bytes": "34684"
},
{
"name": "Go",
"bytes": "13043"
},
{
"name": "HTML",
"bytes": "28019"
},
{
"name": "JavaScript",
"bytes": "300119"
},
{
"name": "Makefile",
"bytes": "348"
},
{
"name": "Protocol Buffer",
"bytes": "5495"
},
{
"name": "Python",
"bytes": "394333"
},
{
"name": "Shell",
"bytes": "4162"
}
],
"symlink_target": ""
} |
import sys
# If your extensions are in another directory, add it here.
sys.path.insert(0, '../..')
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax']
# MathJax file, which is free to use. See http://www.mathjax.org/docs/2.0/start.html
# mathjax_path = 'http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS_HTML-full'
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.txt'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'mpmath'
copyright = '2007-2018, Fredrik Johansson and mpmath developers'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import mpmath
version = mpmath.__version__
# The full version, including alpha/beta/rc tags.
release = mpmath.__version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The "theme" that the HTML output should use.
html_theme = 'classic'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'mpmathdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [(master_doc, 'main.tex', 'mpmath documentation',
r'Fredrik Johansson \and mpmath contributors', 'manual')]
# Additional stuff for the LaTeX preamble.
latex_preamble = r'\usepackage{amsfonts}'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
default_role = 'math'
pngmath_dvipng_args = ['-gamma 1.5', '-D 110']
| {
"content_hash": "333129ff52e6efd8eef91baa0f197dd5",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 92,
"avg_line_length": 30.824,
"alnum_prop": 0.709576953023618,
"repo_name": "JensGrabner/mpmath",
"id": "a5a45faa14c2662e0bcef67750a3dc01a76ccbe3",
"size": "4351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1738910"
},
{
"name": "Shell",
"bytes": "225"
}
],
"symlink_target": ""
} |
from rest_framework import serializers, pagination
from .models import Stock, Share, Application, Log
from accounts.serializers import AccountField
class StockSerializer(serializers.HyperlinkedModelSerializer):
publisher = AccountField(read_only = True, exclude = ['members'])
class Meta:
model = Stock
exclude = ('publisher_type', 'publisher_object_id')
class LogSerializer(serializers.ModelSerializer):
class Meta:
model = Log
fields = ('created_time', 'price')
class ShareSerializer(serializers.ModelSerializer):
stock = serializers.Field(source = 'stock.display_name')
class Meta:
model = Share
exclude = ('owner_type', 'owner_object_id', 'owner')
class ApplicationSerializer(serializers.ModelSerializer):
stock = StockSerializer(fields = ['id', 'display_name'])
class Meta:
model = Application
exclude = ('applicant_type', 'applicant_object_id', 'applicant', 'command',)
| {
"content_hash": "0c89320901fb8a870dc117cb696116b8",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 28.87878787878788,
"alnum_prop": 0.7124868835257083,
"repo_name": "hfercc/mese2014",
"id": "886a926b93f3c21a6bdd31000418bae4df412be0",
"size": "953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "securities/stocks/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "103122"
},
{
"name": "JavaScript",
"bytes": "1054910"
},
{
"name": "Python",
"bytes": "1121791"
},
{
"name": "Shell",
"bytes": "2381"
}
],
"symlink_target": ""
} |
"""Categorical tests."""
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.preprocessing import categorical
class CategoricalTest(tf.test.TestCase):
"""Categorical tests."""
def testSingleCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=1)
x = cat_processor.fit_transform([["0"], [1], [float("nan")], ["C"], ["C"],
[1], ["0"], [np.nan], [3]])
self.assertAllEqual(list(x), [[2], [1], [0], [3], [3], [1], [2], [0], [0]])
def testSingleCategoricalProcessorPandasSingleDF(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
cat_processor = categorical.CategoricalProcessor()
data = pd.DataFrame({"Gender": ["Male", "Female", "Male"]})
x = list(cat_processor.fit_transform(data))
self.assertAllEqual(list(x), [[1], [2], [1]])
def testMultiCategoricalProcessor(self):
cat_processor = categorical.CategoricalProcessor(min_frequency=0,
share=False)
x = cat_processor.fit_transform([["0", "Male"], [1, "Female"], ["3", "Male"]
])
self.assertAllEqual(list(x), [[1, 1], [2, 2], [3, 1]])
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "546ee80abc80c6faa11d86706b3934fa",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 37.46341463414634,
"alnum_prop": 0.6100260416666666,
"repo_name": "EvenStrangest/tensorflow",
"id": "ea0f9c584e5e8234751330165a068fee4013c87c",
"size": "2279",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/preprocessing/tests/categorical_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156263"
},
{
"name": "C++",
"bytes": "9372687"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "784316"
},
{
"name": "Java",
"bytes": "39229"
},
{
"name": "JavaScript",
"bytes": "10875"
},
{
"name": "Jupyter Notebook",
"bytes": "1533241"
},
{
"name": "Makefile",
"bytes": "11364"
},
{
"name": "Objective-C",
"bytes": "5332"
},
{
"name": "Objective-C++",
"bytes": "45585"
},
{
"name": "Protocol Buffer",
"bytes": "112557"
},
{
"name": "Python",
"bytes": "6949434"
},
{
"name": "Shell",
"bytes": "196466"
},
{
"name": "TypeScript",
"bytes": "411503"
}
],
"symlink_target": ""
} |
import sqlalchemy.exceptions as sa_exc
from sqlalchemy import sql, util
from sqlalchemy.sql import expression, util as sql_util, operators
from sqlalchemy.orm.interfaces import MapperExtension, EXT_CONTINUE,\
PropComparator, MapperProperty,\
AttributeExtension
from sqlalchemy.orm import attributes, exc
mapperlib = util.importlater("sqlalchemy.orm", "mapperlib")
all_cascades = frozenset(("delete", "delete-orphan", "all", "merge",
"expunge", "save-update", "refresh-expire",
"none"))
_INSTRUMENTOR = ('mapper', 'instrumentor')
class CascadeOptions(object):
"""Keeps track of the options sent to relationship().cascade"""
def __init__(self, arg=""):
if not arg:
values = set()
else:
values = set(c.strip() for c in arg.split(','))
self.delete_orphan = "delete-orphan" in values
self.delete = "delete" in values or "all" in values
self.save_update = "save-update" in values or "all" in values
self.merge = "merge" in values or "all" in values
self.expunge = "expunge" in values or "all" in values
self.refresh_expire = "refresh-expire" in values or "all" in values
if self.delete_orphan and not self.delete:
util.warn("The 'delete-orphan' cascade option requires "
"'delete'. This will raise an error in 0.6.")
for x in values:
if x not in all_cascades:
raise sa_exc.ArgumentError("Invalid cascade option '%s'" % x)
def __contains__(self, item):
return getattr(self, item.replace("-", "_"), False)
def __repr__(self):
return "CascadeOptions(%s)" % repr(",".join(
[x for x in ['delete', 'save_update', 'merge', 'expunge',
'delete_orphan', 'refresh-expire']
if getattr(self, x, False) is True]))
class Validator(AttributeExtension):
"""Runs a validation method on an attribute value to be set or appended.
The Validator class is used by the :func:`~sqlalchemy.orm.validates`
decorator, and direct access is usually not needed.
"""
def __init__(self, key, validator):
"""Construct a new Validator.
key - name of the attribute to be validated;
will be passed as the second argument to
the validation method (the first is the object instance itself).
validator - an function or instance method which accepts
three arguments; an instance (usually just 'self' for a method),
the key name of the attribute, and the value. The function should
return the same value given, unless it wishes to modify it.
"""
self.key = key
self.validator = validator
def append(self, state, value, initiator):
return self.validator(state.obj(), self.key, value)
def set(self, state, value, oldvalue, initiator):
return self.validator(state.obj(), self.key, value)
def polymorphic_union(table_map, typecolname, aliasname='p_union'):
"""Create a ``UNION`` statement used by a polymorphic mapper.
See :ref:`concrete_inheritance` for an example of how
this is used.
"""
colnames = set()
colnamemaps = {}
types = {}
for key in table_map.keys():
table = table_map[key]
# mysql doesnt like selecting from a select;
# make it an alias of the select
if isinstance(table, sql.Select):
table = table.alias()
table_map[key] = table
m = {}
for c in table.c:
colnames.add(c.key)
m[c.key] = c
types[c.key] = c.type
colnamemaps[table] = m
def col(name, table):
try:
return colnamemaps[table][name]
except KeyError:
return sql.cast(sql.null(), types[name]).label(name)
result = []
for type, table in table_map.iteritems():
if typecolname is not None:
result.append(
sql.select([col(name, table) for name in colnames] +
[sql.literal_column(sql_util._quote_ddl_expr(type)).
label(typecolname)],
from_obj=[table]))
else:
result.append(sql.select([col(name, table) for name in colnames],
from_obj=[table]))
return sql.union_all(*result).alias(aliasname)
def identity_key(*args, **kwargs):
"""Get an identity key.
Valid call signatures:
* ``identity_key(class, ident)``
class
mapped class (must be a positional argument)
ident
primary key, if the key is composite this is a tuple
* ``identity_key(instance=instance)``
instance
object instance (must be given as a keyword arg)
* ``identity_key(class, row=row)``
class
mapped class (must be a positional argument)
row
result proxy row (must be given as a keyword arg)
"""
if args:
if len(args) == 1:
class_ = args[0]
try:
row = kwargs.pop("row")
except KeyError:
ident = kwargs.pop("ident")
elif len(args) == 2:
class_, ident = args
elif len(args) == 3:
class_, ident = args
else:
raise sa_exc.ArgumentError("expected up to three "
"positional arguments, got %s" % len(args))
if kwargs:
raise sa_exc.ArgumentError("unknown keyword arguments: %s"
% ", ".join(kwargs.keys()))
mapper = class_mapper(class_)
if "ident" in locals():
return mapper.identity_key_from_primary_key(ident)
return mapper.identity_key_from_row(row)
instance = kwargs.pop("instance")
if kwargs:
raise sa_exc.ArgumentError("unknown keyword arguments: %s"
% ", ".join(kwargs.keys()))
mapper = object_mapper(instance)
return mapper.identity_key_from_instance(instance)
class ExtensionCarrier(dict):
"""Fronts an ordered collection of MapperExtension objects.
Bundles multiple MapperExtensions into a unified callable unit,
encapsulating ordering, looping and EXT_CONTINUE logic. The
ExtensionCarrier implements the MapperExtension interface, e.g.::
carrier.after_insert(...args...)
The dictionary interface provides containment for implemented
method names mapped to a callable which executes that method
for participating extensions.
"""
interface = set(method for method in dir(MapperExtension)
if not method.startswith('_'))
def __init__(self, extensions=None):
self._extensions = []
for ext in extensions or ():
self.append(ext)
def copy(self):
return ExtensionCarrier(self._extensions)
def push(self, extension):
"""Insert a MapperExtension at the beginning of the collection."""
self._register(extension)
self._extensions.insert(0, extension)
def append(self, extension):
"""Append a MapperExtension at the end of the collection."""
self._register(extension)
self._extensions.append(extension)
def __iter__(self):
"""Iterate over MapperExtensions in the collection."""
return iter(self._extensions)
def _register(self, extension):
"""Register callable fronts for overridden interface methods."""
for method in self.interface.difference(self):
impl = getattr(extension, method, None)
if impl and impl is not getattr(MapperExtension, method):
self[method] = self._create_do(method)
def _create_do(self, method):
"""Return a closure that loops over impls of the named method."""
def _do(*args, **kwargs):
for ext in self._extensions:
ret = getattr(ext, method)(*args, **kwargs)
if ret is not EXT_CONTINUE:
return ret
else:
return EXT_CONTINUE
_do.__name__ = method
return _do
@staticmethod
def _pass(*args, **kwargs):
return EXT_CONTINUE
def __getattr__(self, key):
"""Delegate MapperExtension methods to bundled fronts."""
if key not in self.interface:
raise AttributeError(key)
return self.get(key, self._pass)
class ORMAdapter(sql_util.ColumnAdapter):
"""Extends ColumnAdapter to accept ORM entities.
The selectable is extracted from the given entity,
and the AliasedClass if any is referenced.
"""
def __init__(self, entity, equivalents=None,
chain_to=None, adapt_required=False):
self.mapper, selectable, is_aliased_class = _entity_info(entity)
if is_aliased_class:
self.aliased_class = entity
else:
self.aliased_class = None
sql_util.ColumnAdapter.__init__(self, selectable,
equivalents, chain_to,
adapt_required=adapt_required)
def replace(self, elem):
entity = elem._annotations.get('parentmapper', None)
if not entity or entity.isa(self.mapper):
return sql_util.ColumnAdapter.replace(self, elem)
else:
return None
class AliasedClass(object):
"""Represents an "aliased" form of a mapped class for usage with Query.
The ORM equivalent of a :func:`sqlalchemy.sql.expression.alias`
construct, this object mimics the mapped class using a
__getattr__ scheme and maintains a reference to a
real :class:`~sqlalchemy.sql.expression.Alias` object.
Usage is via the :class:`~sqlalchemy.orm.aliased()` synonym::
# find all pairs of users with the same name
user_alias = aliased(User)
session.query(User, user_alias).\\
join((user_alias, User.id > user_alias.id)).\\
filter(User.name==user_alias.name)
"""
def __init__(self, cls, alias=None, name=None):
self.__mapper = _class_to_mapper(cls)
self.__target = self.__mapper.class_
if alias is None:
alias = self.__mapper._with_polymorphic_selectable.alias()
self.__adapter = sql_util.ClauseAdapter(alias,
equivalents=self.__mapper._equivalent_columns)
self.__alias = alias
# used to assign a name to the RowTuple object
# returned by Query.
self._sa_label_name = name
self.__name__ = 'AliasedClass_' + str(self.__target)
def __getstate__(self):
return {
'mapper':self.__mapper,
'alias':self.__alias,
'name':self._sa_label_name
}
def __setstate__(self, state):
self.__mapper = state['mapper']
self.__target = self.__mapper.class_
alias = state['alias']
self.__adapter = sql_util.ClauseAdapter(alias,
equivalents=self.__mapper._equivalent_columns)
self.__alias = alias
name = state['name']
self._sa_label_name = name
self.__name__ = 'AliasedClass_' + str(self.__target)
def __adapt_element(self, elem):
return self.__adapter.traverse(elem).\
_annotate({
'parententity': self,
'parentmapper':self.__mapper}
)
def __adapt_prop(self, prop):
existing = getattr(self.__target, prop.key)
comparator = existing.comparator.adapted(self.__adapt_element)
queryattr = attributes.QueryableAttribute(prop.key,
impl=existing.impl, parententity=self, comparator=comparator)
setattr(self, prop.key, queryattr)
return queryattr
def __getattr__(self, key):
if self.__mapper.has_property(key):
return self.__adapt_prop(
self.__mapper.get_property(
key, _compile_mappers=False
)
)
for base in self.__target.__mro__:
try:
attr = object.__getattribute__(base, key)
except AttributeError:
continue
else:
break
else:
raise AttributeError(key)
if hasattr(attr, 'func_code'):
is_method = getattr(self.__target, key, None)
if is_method and is_method.im_self is not None:
return util.types.MethodType(attr.im_func, self, self)
else:
return None
elif hasattr(attr, '__get__'):
return attr.__get__(None, self)
else:
return attr
def __repr__(self):
return '<AliasedClass at 0x%x; %s>' % (
id(self), self.__target.__name__)
def _orm_annotate(element, exclude=None):
"""Deep copy the given ClauseElement, annotating each element with the
"_orm_adapt" flag.
Elements within the exclude collection will be cloned but not annotated.
"""
return sql_util._deep_annotate(element, {'_orm_adapt':True}, exclude)
_orm_deannotate = sql_util._deep_deannotate
class _ORMJoin(expression.Join):
"""Extend Join to support ORM constructs as input."""
__visit_name__ = expression.Join.__visit_name__
def __init__(self, left, right, onclause=None,
isouter=False, join_to_left=True):
adapt_from = None
if hasattr(left, '_orm_mappers'):
left_mapper = left._orm_mappers[1]
if join_to_left:
adapt_from = left.right
else:
left_mapper, left, left_is_aliased = _entity_info(left)
if join_to_left and (left_is_aliased or not left_mapper):
adapt_from = left
right_mapper, right, right_is_aliased = _entity_info(right)
if right_is_aliased:
adapt_to = right
else:
adapt_to = None
if left_mapper or right_mapper:
self._orm_mappers = (left_mapper, right_mapper)
if isinstance(onclause, basestring):
prop = left_mapper.get_property(onclause)
elif isinstance(onclause, attributes.QueryableAttribute):
if adapt_from is None:
adapt_from = onclause.__clause_element__()
prop = onclause.property
elif isinstance(onclause, MapperProperty):
prop = onclause
else:
prop = None
if prop:
pj, sj, source, dest, \
secondary, target_adapter = prop._create_joins(
source_selectable=adapt_from,
dest_selectable=adapt_to,
source_polymorphic=True,
dest_polymorphic=True,
of_type=right_mapper)
if sj is not None:
left = sql.join(left, secondary, pj, isouter)
onclause = sj
else:
onclause = pj
self._target_adapter = target_adapter
expression.Join.__init__(self, left, right, onclause, isouter)
def join(self, right, onclause=None, isouter=False, join_to_left=True):
return _ORMJoin(self, right, onclause, isouter, join_to_left)
def outerjoin(self, right, onclause=None, join_to_left=True):
return _ORMJoin(self, right, onclause, True, join_to_left)
def join(left, right, onclause=None, isouter=False, join_to_left=True):
"""Produce an inner join between left and right clauses.
In addition to the interface provided by
:func:`~sqlalchemy.sql.expression.join()`, left and right may be mapped
classes or AliasedClass instances. The onclause may be a
string name of a relationship(), or a class-bound descriptor
representing a relationship.
join_to_left indicates to attempt aliasing the ON clause,
in whatever form it is passed, to the selectable
passed as the left side. If False, the onclause
is used as is.
"""
return _ORMJoin(left, right, onclause, isouter, join_to_left)
def outerjoin(left, right, onclause=None, join_to_left=True):
"""Produce a left outer join between left and right clauses.
In addition to the interface provided by
:func:`~sqlalchemy.sql.expression.outerjoin()`, left and right may be
mapped classes or AliasedClass instances. The onclause may be a string
name of a relationship(), or a class-bound descriptor representing a
relationship.
"""
return _ORMJoin(left, right, onclause, True, join_to_left)
def with_parent(instance, prop):
"""Create filtering criterion that relates this query's primary entity
to the given related instance, using established :func:`.relationship()`
configuration.
The SQL rendered is the same as that rendered when a lazy loader
would fire off from the given parent on that attribute, meaning
that the appropriate state is taken from the parent object in
Python without the need to render joins to the parent table
in the rendered statement.
As of 0.6.4, this method accepts parent instances in all
persistence states, including transient, persistent, and detached.
Only the requisite primary key/foreign key attributes need to
be populated. Previous versions didn't work with transient
instances.
:param instance:
An instance which has some :func:`.relationship`.
:param property:
String property name, or class-bound attribute, which indicates
what relationship from the instance should be used to reconcile the
parent/child relationship.
"""
if isinstance(prop, basestring):
mapper = object_mapper(instance)
prop = mapper.get_property(prop, resolve_synonyms=True)
elif isinstance(prop, attributes.QueryableAttribute):
prop = prop.property
return prop.compare(operators.eq,
instance,
value_is_parent=True)
def _entity_info(entity, compile=True):
"""Return mapping information given a class, mapper, or AliasedClass.
Returns 3-tuple of: mapper, mapped selectable, boolean indicating if this
is an aliased() construct.
If the given entity is not a mapper, mapped class, or aliased construct,
returns None, the entity, False. This is typically used to allow
unmapped selectables through.
"""
if isinstance(entity, AliasedClass):
return entity._AliasedClass__mapper, entity._AliasedClass__alias, True
if isinstance(entity, mapperlib.Mapper):
mapper = entity
elif isinstance(entity, type):
class_manager = attributes.manager_of_class(entity)
if class_manager is None:
return None, entity, False
mapper = class_manager.mapper
else:
return None, entity, False
if compile:
mapper = mapper.compile()
return mapper, mapper._with_polymorphic_selectable, False
def _entity_descriptor(entity, key):
"""Return a class attribute given an entity and string name.
May return :class:`.InstrumentedAttribute` or user-defined
attribute.
"""
if not isinstance(entity, (AliasedClass, type)):
entity = entity.class_
try:
return getattr(entity, key)
except AttributeError:
raise sa_exc.InvalidRequestError(
"Entity '%s' has no property '%s'" %
(entity, key)
)
def _orm_columns(entity):
mapper, selectable, is_aliased_class = _entity_info(entity)
if isinstance(selectable, expression.Selectable):
return [c for c in selectable.c]
else:
return [selectable]
def _orm_selectable(entity):
mapper, selectable, is_aliased_class = _entity_info(entity)
return selectable
def _attr_as_key(attr):
if hasattr(attr, 'key'):
return attr.key
else:
return expression._column_as_key(attr)
def _is_aliased_class(entity):
return isinstance(entity, AliasedClass)
def _state_mapper(state):
return state.manager.mapper
def object_mapper(instance):
"""Given an object, return the primary Mapper associated with the object
instance.
Raises UnmappedInstanceError if no mapping is configured.
"""
try:
state = attributes.instance_state(instance)
return state.manager.mapper
except exc.UnmappedClassError:
raise exc.UnmappedInstanceError(instance)
except exc.NO_STATE:
raise exc.UnmappedInstanceError(instance)
def class_mapper(class_, compile=True):
"""Given a class, return the primary Mapper associated with the key.
Raises UnmappedClassError if no mapping is configured.
"""
try:
class_manager = attributes.manager_of_class(class_)
mapper = class_manager.mapper
except exc.NO_STATE:
raise exc.UnmappedClassError(class_)
if compile:
mapper = mapper.compile()
return mapper
def _class_to_mapper(class_or_mapper, compile=True):
if _is_aliased_class(class_or_mapper):
return class_or_mapper._AliasedClass__mapper
elif isinstance(class_or_mapper, type):
try:
class_manager = attributes.manager_of_class(class_or_mapper)
mapper = class_manager.mapper
except exc.NO_STATE:
raise exc.UnmappedClassError(class_or_mapper)
elif isinstance(class_or_mapper, mapperlib.Mapper):
mapper = class_or_mapper
else:
raise exc.UnmappedClassError(class_or_mapper)
if compile:
return mapper.compile()
else:
return mapper
def has_identity(object):
state = attributes.instance_state(object)
return state.has_identity
def _is_mapped_class(cls):
if isinstance(cls, (AliasedClass, mapperlib.Mapper)):
return True
if isinstance(cls, expression.ClauseElement):
return False
if isinstance(cls, type):
manager = attributes.manager_of_class(cls)
return manager and _INSTRUMENTOR in manager.info
return False
def instance_str(instance):
"""Return a string describing an instance."""
return state_str(attributes.instance_state(instance))
def state_str(state):
"""Return a string describing an instance via its InstanceState."""
if state is None:
return "None"
else:
return '<%s at 0x%x>' % (state.class_.__name__, id(state.obj()))
def attribute_str(instance, attribute):
return instance_str(instance) + "." + attribute
def state_attribute_str(state, attribute):
return state_str(state) + "." + attribute
def identity_equal(a, b):
if a is b:
return True
if a is None or b is None:
return False
try:
state_a = attributes.instance_state(a)
state_b = attributes.instance_state(b)
except exc.NO_STATE:
return False
if state_a.key is None or state_b.key is None:
return False
return state_a.key == state_b.key
| {
"content_hash": "cd67e37be384bd641f14aebd0bef1bae",
"timestamp": "",
"source": "github",
"line_count": 685,
"max_line_length": 78,
"avg_line_length": 33.995620437956205,
"alnum_prop": 0.6031262077554,
"repo_name": "igemsoftware/SYSU-Software2013",
"id": "f26da6c7d1db35c03b0540f8659ee1df9d3b8b49",
"size": "23517",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "project/Python27_32/Lib/site-packages/pypm/external/2/sqlalchemy/orm/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "4234"
},
{
"name": "C",
"bytes": "2246655"
},
{
"name": "C#",
"bytes": "30903"
},
{
"name": "C++",
"bytes": "344228"
},
{
"name": "CSS",
"bytes": "437211"
},
{
"name": "F#",
"bytes": "9222"
},
{
"name": "JavaScript",
"bytes": "7288480"
},
{
"name": "Python",
"bytes": "55202181"
},
{
"name": "Shell",
"bytes": "23510"
},
{
"name": "Tcl",
"bytes": "3329368"
},
{
"name": "Visual Basic",
"bytes": "4330"
},
{
"name": "XSLT",
"bytes": "38160"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from oslotest import base
LOG = logging.getLogger(__name__)
class BaseTest(base.BaseTestCase):
def setUp(self):
super(BaseTest, self).setUp()
def _assert_single_item(self, items, **props):
return self._assert_multiple_items(items, 1, **props)[0]
def _assert_multiple_items(self, items, count, **props):
def _matches(item, **props):
for prop_name, prop_val in props.items():
v = item[prop_name] if isinstance(
item, dict) else getattr(item, prop_name)
if v != prop_val:
return False
return True
filtered_items = list(
[item for item in items if _matches(item, **props)]
)
found = len(filtered_items)
if found != count:
LOG.info("[failed test ctx] items=%s, expected_props=%s", str(
items), props)
self.fail("Wrong number of items found [props=%s, "
"expected=%s, found=%s]" % (props, count, found))
return filtered_items
| {
"content_hash": "ba1f7061acc6ef022b48473f2e51baaa",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 74,
"avg_line_length": 29.31578947368421,
"alnum_prop": 0.5502692998204668,
"repo_name": "openstack/mistral-extra",
"id": "febf6c9de7c94c964307eae57c5bd4e2c9a26aa3",
"size": "1718",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral_extra/tests/unit/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "105353"
},
{
"name": "Shell",
"bytes": "2078"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import sys, os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README')).read()
NEWS = open(os.path.join(here, 'NEWS.rst')).read()
from release_info import version
install_requires = [
# 'anyjson',
# 'argparse',
# 'html',
# 'pyshp',
# 'pyproj',
'gdal',
'numpy',
'scipy',
'pyopencl',
]
setup(name='pygis',
version=version,
description="Python utilities for GIS files",
long_description=README + '\n\n' + NEWS,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Utilities',
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
],
keywords='',
author='Rich Wareham',
author_email='[email protected]',
url='http://github.com/rjw57/pygis',
license='APACHE-2.0',
packages=['pygis',], #find_packages(os.path.join(here, 'src')),
package_dir = {'': 'src'},
package_data = {'pygis': ['data/*']},
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
entry_points={
# 'console_scripts':
# ['shp2kml=pygis.shp2kml:main', 'shp2json=pygis.shp2json:main']
}
)
| {
"content_hash": "9123d8904850aa896167a32f6c07df18",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 79,
"avg_line_length": 28.36,
"alnum_prop": 0.6064880112834978,
"repo_name": "rjw57/pygis",
"id": "6b435c70d4bf598008b55cbd3d4fe29c6a1017c1",
"size": "1418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6366"
},
{
"name": "Python",
"bytes": "56636"
}
],
"symlink_target": ""
} |
import importlib
import inspect
import os
import re
import sys
import tempfile
from io import StringIO
from pathlib import Path
from django.conf.urls import url
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.shortcuts import render
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin, patch_logger
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject
from django.utils.safestring import mark_safe
from django.views.debug import (
CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter,
cleanse_setting, technical_500_response,
)
from ..views import (
custom_exception_reporter_filter_view, index_page,
multivalue_dict_key_error, non_sensitive_view, paranoid_view,
sensitive_args_function_caller, sensitive_kwargs_function_caller,
sensitive_method_view, sensitive_view,
)
PY36 = sys.version_info >= (3, 6)
class User:
def __str__(self):
return 'jacob'
class WithoutEmptyPathUrls:
urlpatterns = [url(r'url/$', index_page, name='url')]
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable:
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(LoggingCaptureMixin, SimpleTestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "Django tried these URL patterns", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
# Pattern and view name of a RegexURLPattern appear.
self.assertContains(response, r"^regex-post/(?P<pk>[0-9]+)/$", status_code=404)
self.assertContains(response, "[name='regex-post']", status_code=404)
# Pattern and view name of a RoutePattern appear.
self.assertContains(response, r"path-post/<int:pk>/", status_code=404)
self.assertContains(response, "[name='path-post']", status_code=404)
@override_settings(ROOT_URLCONF=WithoutEmptyPathUrls)
def test_404_empty_path_not_in_urls(self):
response = self.client.get('/')
self.assertContains(response, "The empty path didn't match any of these.", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr.decode()
)
def test_template_exceptions(self):
try:
self.client.get(reverse('template_exception'))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find("raise Exception('boom')"), -1,
"Failed to find 'raise Exception' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
# Assert as HTML.
self.assertContains(
response,
'<li><code>django.template.loaders.filesystem.Loader</code>: '
'%s (Source does not exist)</li>' % os.path.join(tempdir, 'notfound.html'),
status_code=500,
html=True,
)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>The install worked successfully! Congratulations!</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
allow_database_queries = True
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# When DEBUG=True, technical_500_template() is called.
with patch_logger('django.security.SuspiciousOperation', 'error'):
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
self.assertIn('<p>No POST data</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""The ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = ['print %d' % i for i in range(1, 6)]
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_reporting_of_nested_exceptions(self):
request = self.rf.get('/test_view/')
try:
try:
raise AttributeError(mark_safe('<p>Top level</p>'))
except AttributeError as explicit:
try:
raise ValueError(mark_safe('<p>Second exception</p>')) from explicit
except ValueError:
raise IndexError(mark_safe('<p>Final exception</p>'))
except Exception:
# Custom exception handler, just pass it into ExceptionReporter
exc_type, exc_value, tb = sys.exc_info()
explicit_exc = 'The above exception ({0}) was the direct cause of the following exception:'
implicit_exc = 'During handling of the above exception ({0}), another exception occurred:'
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
# Both messages are twice on page -- one rendered as html,
# one as plain text (for pastebin)
self.assertEqual(2, html.count(explicit_exc.format('<p>Top level</p>')))
self.assertEqual(2, html.count(implicit_exc.format('<p>Second exception</p>')))
self.assertEqual(10, html.count('<p>Final exception</p>'))
text = reporter.get_traceback_text()
self.assertIn(explicit_exc.format('<p>Top level</p>'), text)
self.assertIn(implicit_exc.format('<p>Second exception</p>'), text)
self.assertEqual(3, text.count('<p>Final exception</p>'))
def test_reporting_frames_without_source(self):
try:
source = "def funcName():\n raise Error('Whoops')\nfuncName()"
namespace = {}
code = compile(source, 'generated', 'exec')
exec(code, namespace)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
frames = reporter.get_traceback_frames()
last_frame = frames[-1]
self.assertEqual(last_frame['context_line'], '<source code not available>')
self.assertEqual(last_frame['filename'], 'generated')
self.assertEqual(last_frame['function'], 'funcName')
self.assertEqual(last_frame['lineno'], 2)
html = reporter.get_traceback_html()
self.assertIn('generated in funcName', html)
text = reporter.get_traceback_text()
self.assertIn('"generated" in funcName', text)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_local_variable_escaping(self):
"""Safe strings in local variables are escaped."""
try:
local = mark_safe('<p>Local variable</p>')
raise ValueError(local)
except Exception:
exc_type, exc_value, tb = sys.exc_info()
html = ExceptionReporter(None, exc_type, exc_value, tb).get_traceback_html()
self.assertIn('<td class="code"><pre>'<p>Local variable</p>'</pre></td>', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput:
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput:
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
def test_encoding_error(self):
"""
A UnicodeError displays a portion of the problematic string. HTML in
safe strings is escaped.
"""
try:
mark_safe('abcdefghijkl<p>mnὀp</p>qrstuwxyz').encode('ascii')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h2>Unicode error hint</h2>', html)
self.assertIn('The string that could not be encoded/decoded was: ', html)
self.assertIn('<strong><p>mnὀp</p></strong>', html)
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>%sError at /test_view/</h1>' % ('ModuleNotFound' if PY36 else 'Import'), html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
value = '<td>items</td><td class="code"><pre>'Oops'</pre></td>'
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(value, html)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML(
'<td>items</td><td class="code"><pre><InMemoryUploadedFile: '
'items (application/octet-stream)></pre></td>',
html
)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertInHTML('<td>items</td><td class="code"><pre>'Oops'</pre></td>', html)
def test_exception_fetching_user(self):
"""
The error page can be rendered if the current user can't be retrieved
(such as when the database is unavailable).
"""
class ExceptionUser:
def __str__(self):
raise Exception()
request = self.rf.get('/test_view/')
request.user = ExceptionUser()
try:
raise ValueError('Oops')
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertInHTML('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Oops</pre>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>[unable to retrieve the current user]</p>', html)
text = reporter.get_traceback_text()
self.assertIn('USER: [unable to retrieve the current user]', text)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(DEBUG=True)
def test_template_exception(self):
request = self.rf.get('/test_view/')
try:
render(request, 'debug/template_error.html')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
templ_path = Path(Path(__file__).parent.parent, 'templates', 'debug', 'template_error.html')
self.assertIn(
'Template error:\n'
'In template %(path)s, error at line 2\n'
' \'cycle\' tag requires at least two arguments\n'
' 1 : Template with error:\n'
' 2 : {%% cycle %%} \n'
' 3 : ' % {'path': templ_path},
text
)
def test_request_with_items_key(self):
"""
An exception report can be generated for requests with 'items' in
request GET, POST, FILES, or COOKIES QueryDicts.
"""
# GET
request = self.rf.get('/test_view/?items=Oops')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# POST
request = self.rf.post('/test_view/', data={'items': 'Oops'})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
# FILES
fp = StringIO('filecontent')
request = self.rf.post('/test_view/', data={'name': 'filename', 'items': fp})
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn('items = <InMemoryUploadedFile:', text)
# COOKES
rf = RequestFactory()
rf.cookies['items'] = 'Oops'
request = rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("items = 'Oops'", text)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin:
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = str(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = str(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', '[email protected]')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = str(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports (#14614).
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
No POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
The sensitive_variables decorator works with object methods.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view, check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view, check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view, check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as arguments to the decorated
function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Sensitive variables don't leak in the sensitive_variables decorator's
frame, when those variables are passed as keyword arguments to the
decorated function.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots:
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
No POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
It's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
def test_ajax_response_encoding(self):
response = self.client.get('/raises500/', HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(response['Content-Type'], 'text/plain; charset=utf-8')
class HelperFunctionTests(SimpleTestCase):
def test_cleanse_setting_basic(self):
self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_ignore_case(self):
self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_recurses_in_dictionary(self):
initial = {'login': 'cooper', 'password': 'secret'}
expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE}
self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
| {
"content_hash": "6a494f8dd18a181d0c7b0346d823e220",
"timestamp": "",
"source": "github",
"line_count": 1156,
"max_line_length": 117,
"avg_line_length": 43.28373702422145,
"alnum_prop": 0.6158565832600528,
"repo_name": "edmorley/django",
"id": "b677fd98c9b0fb327a3160d69c52866da2754dea",
"size": "50040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/view_tests/tests/test_debug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "202902"
},
{
"name": "JavaScript",
"bytes": "252653"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11837174"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
from .api_object import APIObject
class Error(APIObject): # pylint: disable=abstract-method
pass
| {
"content_hash": "d69ce83e5285c847ef8a35521143ee5b",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 58,
"avg_line_length": 25.75,
"alnum_prop": 0.7572815533980582,
"repo_name": "slash-testing/backslash-python",
"id": "201086d7e33547012ec768ea989601c492336ec3",
"size": "103",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "backslash/error.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "349"
},
{
"name": "Python",
"bytes": "38143"
}
],
"symlink_target": ""
} |
import sys
import logging
from os import path, getenv, makedirs
APP_DATA = getenv("APPDATA")
LOG_FILE = path.join(APP_DATA, "Subtitles Distributor/subtitles_distributor.log")
makedirs(path.dirname(LOG_FILE), exist_ok=True)
with open(LOG_FILE, 'a'): pass
def logging_level():
if getattr(sys, 'frozen', False):
return logging.INFO
return logging.DEBUG
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standardformat': {
'format': '[{asctime}] [{levelname:8}] [{name}] [{threadName}] [{funcName}():{lineno}]: [{message}]',
'style': '{'
},
},
'handlers': {
'file': {
'class': 'logging.FileHandler',
'filename': LOG_FILE,
'mode': 'w',
'formatter': 'standardformat',
},
'stream': {
'class': 'logging.StreamHandler',
'formatter': 'standardformat',
},
},
'loggers': {
'gui.mainWindow': {
'handlers': ['file', 'stream'],
'level': logging_level(),
'propagate': True
},
'main.subtitlesdistributor': {
'handlers': ['file', 'stream'],
'level': logging_level(),
},
'main.fileextractors.fileextractor': {
'handlers': ['file', 'stream'],
'level': logging_level(),
},
'main.fileeventhandlers.fileeventhandler': {
'handlers': ['file', 'stream'],
'level': logging_level(),
},
'main.utilities.subtitlesadjuster': {
'handlers': ['file', 'stream'],
'level': logging_level(),
},
'config.config': {
'handlers': ['file', 'stream'],
'level': logging_level(),
},
}
}
| {
"content_hash": "309b9d3d3108e46b95158edddaca0b26",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 113,
"avg_line_length": 27.984615384615385,
"alnum_prop": 0.49752611324903795,
"repo_name": "michael-stanin/Subtitles-Distributor",
"id": "26a198f89c12ee5971e42b99e45f3878167a0d54",
"size": "1819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/log/logconf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107370"
}
],
"symlink_target": ""
} |
"""Ops for building neural network layers, regularizers, summaries, etc.
See the @{$python/contrib.layers} guide.
@@avg_pool2d
@@batch_norm
@@convolution2d
@@conv2d_in_plane
@@convolution2d_in_plane
@@conv2d_transpose
@@convolution2d_transpose
@@dropout
@@embedding_lookup_unique
@@flatten
@@fully_connected
@@layer_norm
@@linear
@@max_pool2d
@@one_hot_encoding
@@relu
@@relu6
@@repeat
@@safe_embedding_lookup_sparse
@@separable_conv2d
@@separable_convolution2d
@@softmax
@@stack
@@unit_norm
@@embed_sequence
@@apply_regularization
@@l1_regularizer
@@l2_regularizer
@@sum_regularizer
@@xavier_initializer
@@xavier_initializer_conv2d
@@variance_scaling_initializer
@@optimize_loss
@@summarize_activation
@@summarize_tensor
@@summarize_tensors
@@summarize_collection
@@summarize_activations
@@bucketized_column
@@check_feature_columns
@@create_feature_spec_for_parsing
@@crossed_column
@@embedding_column
@@scattered_embedding_column
@@input_from_feature_columns
@@joint_weighted_sum_from_feature_columns
@@make_place_holder_tensors_for_base_features
@@multi_class_target
@@one_hot_column
@@parse_feature_columns_from_examples
@@parse_feature_columns_from_sequence_examples
@@real_valued_column
@@shared_embedding_columns
@@sparse_column_with_hash_bucket
@@sparse_column_with_integerized_feature
@@sparse_column_with_keys
@@weighted_sparse_column
@@weighted_sum_from_feature_columns
@@infer_real_valued_columns
@@sequence_input_from_feature_columns
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.contrib.layers.python.layers import *
# pylint: enable=unused-import,wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['bias_add',
'conv2d',
'feature_column',
'legacy_fully_connected',
'legacy_linear',
'legacy_relu',
'OPTIMIZER_CLS_NAMES',
'regression_target',
'SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY',
'summaries']
remove_undocumented(__name__, _allowed_symbols)
| {
"content_hash": "0a94740d6d5cb8d13b318beaf3d96ec7",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 72,
"avg_line_length": 23.72340425531915,
"alnum_prop": 0.7125560538116592,
"repo_name": "tntnatbry/tensorflow",
"id": "e746107e361d0502f727bbf28d0c6697b754cfcb",
"size": "2919",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/layers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "174703"
},
{
"name": "C++",
"bytes": "21511895"
},
{
"name": "CMake",
"bytes": "122876"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "786880"
},
{
"name": "HTML",
"bytes": "557007"
},
{
"name": "Java",
"bytes": "277432"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36990"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64714"
},
{
"name": "Protocol Buffer",
"bytes": "197812"
},
{
"name": "Python",
"bytes": "17846923"
},
{
"name": "Shell",
"bytes": "319915"
},
{
"name": "TypeScript",
"bytes": "775401"
}
],
"symlink_target": ""
} |
import os
import sys
from datetime import datetime
from optparse import (OptionParser,BadOptionError,AmbiguousOptionError)
base_path = os.popen("pwd").read()
# stolen from http://stackoverflow.com/questions/1885161/how-can-i-get-optparses-optionparser-to-ignore-invalid-options
class PassThroughOptionParser(OptionParser):
"""
An unknown option pass-through implementation of OptionParser.
When unknown arguments are encountered, bundle with largs and try again,
until rargs is depleted.
sys.exit(status) will still be called if a known argument is passed
incorrectly (e.g. missing arguments or bad argument types, etc.)
"""
def _process_args(self, largs, rargs, values):
while rargs:
try:
OptionParser._process_args(self,largs,rargs,values)
except (AttributeError, BadOptionError), e:
pass
parser = PassThroughOptionParser()
parser.add_option("--base_path", help="Path to the build directory", dest="base_path", default=base_path)
parser.add_option("--build", help="Build directory", dest="build", default="build")
parser.add_option("--app_file", help="Name of the appfile", dest="app_file", default="benchmark_file")
(options, args) = parser.parse_args()
appfile = open(options.app_file, "w+")
passed_arguments = []
skip = False;
for i in range(len(sys.argv) -1):
if sys.argv[i + 1] == "--base_path" \
or sys.argv[i + 1] == "--build" \
or sys.argv[i + 1] == "--app_file":
skip = True
continue
if not skip:
passed_arguments.append(sys.argv[i + 1])
skip = False
executable = options.base_path.rstrip("\n")+"/"+options.build.rstrip("\n")+"/src/main/dwarf_mine"
nodes = [
("bigdwarf", "smp"),
("bigdwarf", "cuda"),
("quadcore1", "smp"),
("quadcore1", "cuda"),
("quadcore2", "smp"),
# ("quadcore2", "cuda"),
# ("quadcore3", "smp"),
# ("quadcore3", "cuda")
]
for configuration in nodes:
appfile.write("-host "+configuration[0]+" -np 1 "+executable+" -m "+configuration[1]+" "+" ".join(passed_arguments)+" \n")
appfile.close();
| {
"content_hash": "257a984d0801c3ed58efd01a33559ef9",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 123,
"avg_line_length": 31.50769230769231,
"alnum_prop": 0.66943359375,
"repo_name": "mp13on11/dwarf_mine",
"id": "b72d38e6de3350e64b7051ea34a356368cdb3257",
"size": "2066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utilities/app_generator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "536923"
},
{
"name": "C++",
"bytes": "2978458"
},
{
"name": "Python",
"bytes": "209314"
},
{
"name": "Shell",
"bytes": "297769"
}
],
"symlink_target": ""
} |
from lib.plugins import PluginBase
from lib.irma.common.utils import IrmaProbeType
class VirusTotalFormatterPlugin(PluginBase):
# =================
# plugin metadata
# =================
_plugin_name_ = "VirusTotal"
_plugin_author_ = "IRMA (c) Quarkslab"
_plugin_version_ = "1.0.0"
_plugin_category_ = IrmaProbeType.external
_plugin_description_ = "VirusTotal results Formatter"
_plugin_dependencies_ = []
# ===========
# Formatter
# ===========
@staticmethod
def can_handle_results(raw_result):
expected_name = VirusTotalFormatterPlugin.plugin_name
expected_category = VirusTotalFormatterPlugin.plugin_category
return raw_result.get('type', None) == expected_category and \
raw_result.get('name', None) == expected_name
"""
VT AVs list
'Bkav', 'MicroWorld-eScan', 'nProtect', 'K7AntiVirus', 'NANO-Antivirus',
'F-Prot', 'Norman', 'Kaspersky', 'ByteHero', 'F-Secure', 'TrendMicro',
'McAfee-GW-Edition', 'Sophos', 'Jiangmin', 'ViRobot', 'Commtouch',
'AhnLab-V3', 'VBA32', 'Rising', 'Ikarus', 'Fortinet', 'Panda',
'CAT-QuickHeal', 'McAfee', 'Malwarebytes', 'K7GW', 'TheHacker',
'TotalDefense', 'TrendMicro-HouseCall', 'Avast', 'ClamAV', 'BitDefender',
'Agnitum', 'Comodo', 'DrWeb', 'VIPRE', 'AntiVir', 'Emsisoft', 'Antiy-AVL',
'Kingsoft', 'Microsoft', 'SUPERAntiSpyware', 'GData', 'ESET-NOD32',
'AVG', 'Baidu-International', 'Symantec', 'PCTools',
"""
@staticmethod
def format(raw_result):
status = raw_result.get('status', -1)
if status != -1:
vt_result = raw_result.pop('results', {})
av_result = vt_result.get('results', {})
if status == 1:
# get ratios from virustotal results
nb_detect = av_result.get('positives', 0)
nb_total = av_result.get('total', 0)
raw_result['results'] = "detected by {0}/{1}" \
"".format(nb_detect, nb_total)
raw_result['external_url'] = av_result.get('permalink', None)
elif status == 0:
raw_result['results'] = av_result.get('verbose_msg', None)
return raw_result
| {
"content_hash": "3b92c8fa4cc1dd4a0ce7ac9c7f871c05",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 78,
"avg_line_length": 39.660714285714285,
"alnum_prop": 0.5821701936064836,
"repo_name": "deloittem/irma-frontend",
"id": "9dd4f8f0983519a648fb44948ebbe90ec2cc7fa4",
"size": "2746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frontend/helpers/formatters/external/virustotal/virustotal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "229845"
},
{
"name": "DIGITAL Command Language",
"bytes": "68"
},
{
"name": "HTML",
"bytes": "24102"
},
{
"name": "JavaScript",
"bytes": "1773453"
},
{
"name": "Makefile",
"bytes": "92"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "261983"
},
{
"name": "Shell",
"bytes": "16816"
}
],
"symlink_target": ""
} |
from cms.api import create_page
from cms.models import Page
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.utils.unittest import skipIf
try:
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.common.exceptions import NoSuchElementException
from django.test import LiveServerTestCase
except ImportError:
from django.test import TestCase as LiveServerTestCase
WebDriver = NoSuchElementException = False
try:
# allow xvfb
from pyvirtualdisplay import Display
except ImportError:
Display = None
class CMSLiveTests(LiveServerTestCase):
@classmethod
def setUpClass(cls):
if Display:
cls.display = Display(visible=0, size=(800, 600))
cls.display.start()
if WebDriver:
cls.selenium = WebDriver()
super(CMSLiveTests, cls).setUpClass()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'selenium'):
cls.selenium.quit()
if hasattr(cls, 'display'):
cls.display.stop()
super(CMSLiveTests, cls).tearDownClass()
def tearDown(self):
super(CMSLiveTests, self).tearDown()
Page.objects.all().delete() # not 100% sure why this is needed, but it is
def stop_server(self):
if hasattr(self, 'server_thread'):
self.server_thread.join()
def wait_until(self, callback, timeout=10):
"""
Helper function that blocks the execution of the tests until the
specified callback returns a value that is not falsy. This function can
be called, for example, after clicking a link or submitting a form.
See the other public methods that call this function for more details.
"""
from selenium.webdriver.support.wait import WebDriverWait
WebDriverWait(self.selenium, timeout).until(callback)
def wait_loaded_tag(self, tag_name, timeout=10):
"""
Helper function that blocks until the element with the given tag name
is found on the page.
"""
self.wait_until(
lambda driver: driver.find_element_by_tag_name(tag_name),
timeout
)
def wait_page_loaded(self):
"""
Block until page has started to load.
"""
from selenium.common.exceptions import TimeoutException
try:
# Wait for the next page to be loaded
self.wait_loaded_tag('body')
except TimeoutException:
# IE7 occasionnally returns an error "Internet Explorer cannot
# display the webpage" and doesn't load the next page. We just
# ignore it.
pass
class ToolbarBasicTests(CMSLiveTests):
def setUp(self):
Site.objects.create(domain='example.org', name='example.org')
@skipIf(not WebDriver, 'Selenium not found or Django too old')
def test_toolbar_login(self):
create_page('Home', 'simple.html', 'en', published=True).publish()
user = User()
user.username = 'admin'
user.set_password('admin')
user.is_superuser = user.is_staff = user.is_active = True
user.save()
url = '%s/?edit' % self.live_server_url
self.selenium.get(url)
self.assertRaises(NoSuchElementException, self.selenium.find_element_by_class_name, 'cms_toolbar-item_logout')
username_input = self.selenium.find_element_by_id("id_cms-username")
username_input.send_keys('admin')
password_input = self.selenium.find_element_by_id("id_cms-password")
password_input.send_keys('admin')
password_input.submit()
self.wait_page_loaded()
self.assertTrue(self.selenium.find_element_by_class_name('cms_toolbar-item-navigation'))
| {
"content_hash": "d68efc647bdc2fbb11043e9111669021",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 118,
"avg_line_length": 34.862385321100916,
"alnum_prop": 0.6510526315789473,
"repo_name": "pancentric/django-cms",
"id": "b2b541aa7211ab3de9de9a21752ad758fa7486c6",
"size": "3824",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "cms/tests/frontend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "60663"
},
{
"name": "JavaScript",
"bytes": "198877"
},
{
"name": "Python",
"bytes": "2557408"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
} |
import pickle
from StringIO import StringIO
from test.test_support import verbose, run_unittest, findfile
import unittest
import xml.dom
import xml.dom.minidom
import xml.parsers.expat
from xml.dom.minidom import parse, Node, Document, parseString
from xml.dom.minidom import getDOMImplementation
tstfile = findfile("test.xml", subdir="xmltestdata")
# The tests of DocumentType importing use these helpers to construct
# the documents to work with, since not all DOM builders actually
# create the DocumentType nodes.
def create_doc_without_doctype(doctype=None):
return getDOMImplementation().createDocument(None, "doc", doctype)
def create_nonempty_doctype():
doctype = getDOMImplementation().createDocumentType("doc", None, None)
doctype.entities._seq = []
doctype.notations._seq = []
notation = xml.dom.minidom.Notation("my-notation", None,
"http://xml.python.org/notations/my")
doctype.notations._seq.append(notation)
entity = xml.dom.minidom.Entity("my-entity", None,
"http://xml.python.org/entities/my",
"my-notation")
entity.version = "1.0"
entity.encoding = "utf-8"
entity.actualEncoding = "us-ascii"
doctype.entities._seq.append(entity)
return doctype
def create_doc_with_doctype():
doctype = create_nonempty_doctype()
doc = create_doc_without_doctype(doctype)
doctype.entities.item(0).ownerDocument = doc
doctype.notations.item(0).ownerDocument = doc
return doc
class MinidomTest(unittest.TestCase):
def confirm(self, test, testname = "Test"):
self.assertTrue(test, testname)
def checkWholeText(self, node, s):
t = node.wholeText
self.confirm(t == s, "looking for %s, found %s" % (repr(s), repr(t)))
def testParseFromFile(self):
dom = parse(StringIO(open(tstfile).read()))
dom.unlink()
self.confirm(isinstance(dom,Document))
def testGetElementsByTagName(self):
dom = parse(tstfile)
self.confirm(dom.getElementsByTagName("LI") == \
dom.documentElement.getElementsByTagName("LI"))
dom.unlink()
def testInsertBefore(self):
dom = parseString("<doc><foo/></doc>")
root = dom.documentElement
elem = root.childNodes[0]
nelem = dom.createElement("element")
root.insertBefore(nelem, elem)
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.childNodes[0] is nelem
and root.childNodes.item(0) is nelem
and root.childNodes[1] is elem
and root.childNodes.item(1) is elem
and root.firstChild is nelem
and root.lastChild is elem
and root.toxml() == "<doc><element/><foo/></doc>"
, "testInsertBefore -- node properly placed in tree")
nelem = dom.createElement("element")
root.insertBefore(nelem, None)
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3
and root.childNodes[1] is elem
and root.childNodes.item(1) is elem
and root.childNodes[2] is nelem
and root.childNodes.item(2) is nelem
and root.lastChild is nelem
and nelem.previousSibling is elem
and root.toxml() == "<doc><element/><foo/><element/></doc>"
, "testInsertBefore -- node properly placed in tree")
nelem2 = dom.createElement("bar")
root.insertBefore(nelem2, nelem)
self.confirm(len(root.childNodes) == 4
and root.childNodes.length == 4
and root.childNodes[2] is nelem2
and root.childNodes.item(2) is nelem2
and root.childNodes[3] is nelem
and root.childNodes.item(3) is nelem
and nelem2.nextSibling is nelem
and nelem.previousSibling is nelem2
and root.toxml() ==
"<doc><element/><foo/><bar/><element/></doc>"
, "testInsertBefore -- node properly placed in tree")
dom.unlink()
def _create_fragment_test_nodes(self):
dom = parseString("<doc/>")
orig = dom.createTextNode("original")
c1 = dom.createTextNode("foo")
c2 = dom.createTextNode("bar")
c3 = dom.createTextNode("bat")
dom.documentElement.appendChild(orig)
frag = dom.createDocumentFragment()
frag.appendChild(c1)
frag.appendChild(c2)
frag.appendChild(c3)
return dom, orig, c1, c2, c3, frag
def testInsertBeforeFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.insertBefore(frag, None)
self.confirm(tuple(dom.documentElement.childNodes) ==
(orig, c1, c2, c3),
"insertBefore(<fragment>, None)")
frag.unlink()
dom.unlink()
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.insertBefore(frag, orig)
self.confirm(tuple(dom.documentElement.childNodes) ==
(c1, c2, c3, orig),
"insertBefore(<fragment>, orig)")
frag.unlink()
dom.unlink()
def testAppendChild(self):
dom = parse(tstfile)
dom.documentElement.appendChild(dom.createComment(u"Hello"))
self.confirm(dom.documentElement.childNodes[-1].nodeName == "#comment")
self.confirm(dom.documentElement.childNodes[-1].data == "Hello")
dom.unlink()
def testAppendChildFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.appendChild(frag)
self.confirm(tuple(dom.documentElement.childNodes) ==
(orig, c1, c2, c3),
"appendChild(<fragment>)")
frag.unlink()
dom.unlink()
def testReplaceChildFragment(self):
dom, orig, c1, c2, c3, frag = self._create_fragment_test_nodes()
dom.documentElement.replaceChild(frag, orig)
orig.unlink()
self.confirm(tuple(dom.documentElement.childNodes) == (c1, c2, c3),
"replaceChild(<fragment>)")
frag.unlink()
dom.unlink()
def testLegalChildren(self):
dom = Document()
elem = dom.createElement('element')
text = dom.createTextNode('text')
self.assertRaises(xml.dom.HierarchyRequestErr, dom.appendChild, text)
dom.appendChild(elem)
self.assertRaises(xml.dom.HierarchyRequestErr, dom.insertBefore, text,
elem)
self.assertRaises(xml.dom.HierarchyRequestErr, dom.replaceChild, text,
elem)
nodemap = elem.attributes
self.assertRaises(xml.dom.HierarchyRequestErr, nodemap.setNamedItem,
text)
self.assertRaises(xml.dom.HierarchyRequestErr, nodemap.setNamedItemNS,
text)
elem.appendChild(text)
dom.unlink()
def testNamedNodeMapSetItem(self):
dom = Document()
elem = dom.createElement('element')
attrs = elem.attributes
attrs["foo"] = "bar"
a = attrs.item(0)
self.confirm(a.ownerDocument is dom,
"NamedNodeMap.__setitem__() sets ownerDocument")
self.confirm(a.ownerElement is elem,
"NamedNodeMap.__setitem__() sets ownerElement")
self.confirm(a.value == "bar",
"NamedNodeMap.__setitem__() sets value")
self.confirm(a.nodeValue == "bar",
"NamedNodeMap.__setitem__() sets nodeValue")
elem.unlink()
dom.unlink()
def testNonZero(self):
dom = parse(tstfile)
self.confirm(dom)# should not be zero
dom.appendChild(dom.createComment("foo"))
self.confirm(not dom.childNodes[-1].childNodes)
dom.unlink()
def testUnlink(self):
dom = parse(tstfile)
dom.unlink()
def testElement(self):
dom = Document()
dom.appendChild(dom.createElement("abc"))
self.confirm(dom.documentElement)
dom.unlink()
def testAAA(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam2")
self.confirm(el.toxml() == '<abc spam="jam2"/>', "testAAA")
a = el.getAttributeNode("spam")
self.confirm(a.ownerDocument is dom,
"setAttribute() sets ownerDocument")
self.confirm(a.ownerElement is dom.documentElement,
"setAttribute() sets ownerElement")
dom.unlink()
def testAAB(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam")
el.setAttribute("spam", "jam2")
self.confirm(el.toxml() == '<abc spam="jam2"/>', "testAAB")
dom.unlink()
def testAddAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
child.setAttribute("def", "ghi")
self.confirm(child.getAttribute("def") == "ghi")
self.confirm(child.attributes["def"].value == "ghi")
child.setAttribute("jkl", "mno")
self.confirm(child.getAttribute("jkl") == "mno")
self.confirm(child.attributes["jkl"].value == "mno")
self.confirm(len(child.attributes) == 2)
child.setAttribute("def", "newval")
self.confirm(child.getAttribute("def") == "newval")
self.confirm(child.attributes["def"].value == "newval")
self.confirm(len(child.attributes) == 2)
dom.unlink()
def testDeleteAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
self.confirm(len(child.attributes) == 0)
child.setAttribute("def", "ghi")
self.confirm(len(child.attributes) == 1)
del child.attributes["def"]
self.confirm(len(child.attributes) == 0)
dom.unlink()
def testRemoveAttr(self):
dom = Document()
child = dom.appendChild(dom.createElement("abc"))
child.setAttribute("def", "ghi")
self.confirm(len(child.attributes) == 1)
child.removeAttribute("def")
self.confirm(len(child.attributes) == 0)
dom.unlink()
def testRemoveAttrNS(self):
dom = Document()
child = dom.appendChild(
dom.createElementNS("http://www.python.org", "python:abc"))
child.setAttributeNS("http://www.w3.org", "xmlns:python",
"http://www.python.org")
child.setAttributeNS("http://www.python.org", "python:abcattr", "foo")
self.confirm(len(child.attributes) == 2)
child.removeAttributeNS("http://www.python.org", "abcattr")
self.confirm(len(child.attributes) == 1)
dom.unlink()
def testRemoveAttributeNode(self):
dom = Document()
child = dom.appendChild(dom.createElement("foo"))
child.setAttribute("spam", "jam")
self.confirm(len(child.attributes) == 1)
node = child.getAttributeNode("spam")
child.removeAttributeNode(node)
self.confirm(len(child.attributes) == 0
and child.getAttributeNode("spam") is None)
dom.unlink()
def testChangeAttr(self):
dom = parseString("<abc/>")
el = dom.documentElement
el.setAttribute("spam", "jam")
self.confirm(len(el.attributes) == 1)
el.setAttribute("spam", "bam")
# Set this attribute to be an ID and make sure that doesn't change
# when changing the value:
el.setIdAttribute("spam")
self.confirm(len(el.attributes) == 1
and el.attributes["spam"].value == "bam"
and el.attributes["spam"].nodeValue == "bam"
and el.getAttribute("spam") == "bam"
and el.getAttributeNode("spam").isId)
el.attributes["spam"] = "ham"
self.confirm(len(el.attributes) == 1
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam"].isId)
el.setAttribute("spam2", "bam")
self.confirm(len(el.attributes) == 2
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam2"].value == "bam"
and el.attributes["spam2"].nodeValue == "bam"
and el.getAttribute("spam2") == "bam")
el.attributes["spam2"] = "bam2"
self.confirm(len(el.attributes) == 2
and el.attributes["spam"].value == "ham"
and el.attributes["spam"].nodeValue == "ham"
and el.getAttribute("spam") == "ham"
and el.attributes["spam2"].value == "bam2"
and el.attributes["spam2"].nodeValue == "bam2"
and el.getAttribute("spam2") == "bam2")
dom.unlink()
def testGetElementsByTagNameNS(self):
d="""<foo xmlns:minidom='http://pyxml.sf.net/minidom'>
<minidom:myelem/>
</foo>"""
dom = parseString(d)
elems = dom.getElementsByTagNameNS("http://pyxml.sf.net/minidom",
"myelem")
self.confirm(len(elems) == 1
and elems[0].namespaceURI == "http://pyxml.sf.net/minidom"
and elems[0].localName == "myelem"
and elems[0].prefix == "minidom"
and elems[0].tagName == "minidom:myelem"
and elems[0].nodeName == "minidom:myelem")
dom.unlink()
def get_empty_nodelist_from_elements_by_tagName_ns_helper(self, doc, nsuri,
lname):
nodelist = doc.getElementsByTagNameNS(nsuri, lname)
self.confirm(len(nodelist) == 0)
def testGetEmptyNodeListFromElementsByTagNameNS(self):
doc = parseString('<doc/>')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, 'http://xml.python.org/namespaces/a', 'localname')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, '*', 'splat')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, 'http://xml.python.org/namespaces/a', '*')
doc = parseString('<doc xmlns="http://xml.python.org/splat"><e/></doc>')
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "http://xml.python.org/splat", "not-there")
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "*", "not-there")
self.get_empty_nodelist_from_elements_by_tagName_ns_helper(
doc, "http://somewhere.else.net/not-there", "e")
def testElementReprAndStr(self):
dom = Document()
el = dom.appendChild(dom.createElement("abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
dom.unlink()
def testElementReprAndStrUnicode(self):
dom = Document()
el = dom.appendChild(dom.createElement(u"abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
dom.unlink()
def testElementReprAndStrUnicodeNS(self):
dom = Document()
el = dom.appendChild(
dom.createElementNS(u"http://www.slashdot.org", u"slash:abc"))
string1 = repr(el)
string2 = str(el)
self.confirm(string1 == string2)
self.confirm("slash:abc" in string1)
dom.unlink()
def testAttributeRepr(self):
dom = Document()
el = dom.appendChild(dom.createElement(u"abc"))
node = el.setAttribute("abc", "def")
self.confirm(str(node) == repr(node))
dom.unlink()
def testWriteXML(self):
str = '<?xml version="1.0" ?><a b="c"/>'
dom = parseString(str)
domstr = dom.toxml()
dom.unlink()
self.confirm(str == domstr)
def testAltNewline(self):
str = '<?xml version="1.0" ?>\n<a b="c"/>\n'
dom = parseString(str)
domstr = dom.toprettyxml(newl="\r\n")
dom.unlink()
self.confirm(domstr == str.replace("\n", "\r\n"))
def test_toprettyxml_with_text_nodes(self):
# see issue #4147, text nodes are not indented
decl = '<?xml version="1.0" ?>\n'
self.assertEqual(parseString('<B>A</B>').toprettyxml(),
decl + '<B>A</B>\n')
self.assertEqual(parseString('<C>A<B>A</B></C>').toprettyxml(),
decl + '<C>\n\tA\n\t<B>A</B>\n</C>\n')
self.assertEqual(parseString('<C><B>A</B>A</C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\tA\n</C>\n')
self.assertEqual(parseString('<C><B>A</B><B>A</B></C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\t<B>A</B>\n</C>\n')
self.assertEqual(parseString('<C><B>A</B>A<B>A</B></C>').toprettyxml(),
decl + '<C>\n\t<B>A</B>\n\tA\n\t<B>A</B>\n</C>\n')
def test_toprettyxml_with_adjacent_text_nodes(self):
# see issue #4147, adjacent text nodes are indented normally
dom = Document()
elem = dom.createElement(u'elem')
elem.appendChild(dom.createTextNode(u'TEXT'))
elem.appendChild(dom.createTextNode(u'TEXT'))
dom.appendChild(elem)
decl = '<?xml version="1.0" ?>\n'
self.assertEqual(dom.toprettyxml(),
decl + '<elem>\n\tTEXT\n\tTEXT\n</elem>\n')
def test_toprettyxml_preserves_content_of_text_node(self):
# see issue #4147
for str in ('<B>A</B>', '<A><B>C</B></A>'):
dom = parseString(str)
dom2 = parseString(dom.toprettyxml())
self.assertEqual(
dom.getElementsByTagName('B')[0].childNodes[0].toxml(),
dom2.getElementsByTagName('B')[0].childNodes[0].toxml())
def testProcessingInstruction(self):
dom = parseString('<e><?mypi \t\n data \t\n ?></e>')
pi = dom.documentElement.firstChild
self.confirm(pi.target == "mypi"
and pi.data == "data \t\n "
and pi.nodeName == "mypi"
and pi.nodeType == Node.PROCESSING_INSTRUCTION_NODE
and pi.attributes is None
and not pi.hasChildNodes()
and len(pi.childNodes) == 0
and pi.firstChild is None
and pi.lastChild is None
and pi.localName is None
and pi.namespaceURI == xml.dom.EMPTY_NAMESPACE)
def testTooManyDocumentElements(self):
doc = parseString("<doc/>")
elem = doc.createElement("extra")
# Should raise an exception when adding an extra document element.
self.assertRaises(xml.dom.HierarchyRequestErr, doc.appendChild, elem)
elem.unlink()
doc.unlink()
def testRemoveNamedItem(self):
doc = parseString("<doc a=''/>")
e = doc.documentElement
attrs = e.attributes
a1 = e.getAttributeNode("a")
a2 = attrs.removeNamedItem("a")
self.confirm(a1.isSameNode(a2))
self.assertRaises(xml.dom.NotFoundErr, attrs.removeNamedItem, "a")
def testRemoveNamedItemNS(self):
doc = parseString("<doc xmlns:a='http://xml.python.org/' a:b=''/>")
e = doc.documentElement
attrs = e.attributes
a1 = e.getAttributeNodeNS("http://xml.python.org/", "b")
a2 = attrs.removeNamedItemNS("http://xml.python.org/", "b")
self.confirm(a1.isSameNode(a2))
self.assertRaises(xml.dom.NotFoundErr, attrs.removeNamedItemNS,
"http://xml.python.org/", "b")
def _testCloneElementCopiesAttributes(self, e1, e2, test):
attrs1 = e1.attributes
attrs2 = e2.attributes
keys1 = attrs1.keys()
keys2 = attrs2.keys()
keys1.sort()
keys2.sort()
self.confirm(keys1 == keys2, "clone of element has same attribute keys")
for i in range(len(keys1)):
a1 = attrs1.item(i)
a2 = attrs2.item(i)
self.confirm(a1 is not a2
and a1.value == a2.value
and a1.nodeValue == a2.nodeValue
and a1.namespaceURI == a2.namespaceURI
and a1.localName == a2.localName
, "clone of attribute node has proper attribute values")
self.confirm(a2.ownerElement is e2,
"clone of attribute node correctly owned")
def _setupCloneElement(self, deep):
dom = parseString("<doc attr='value'><foo/></doc>")
root = dom.documentElement
clone = root.cloneNode(deep)
self._testCloneElementCopiesAttributes(
root, clone, "testCloneElement" + (deep and "Deep" or "Shallow"))
# mutilate the original so shared data is detected
root.tagName = root.nodeName = "MODIFIED"
root.setAttribute("attr", "NEW VALUE")
root.setAttribute("added", "VALUE")
return dom, clone
def testCloneElementShallow(self):
dom, clone = self._setupCloneElement(0)
self.confirm(len(clone.childNodes) == 0
and clone.childNodes.length == 0
and clone.parentNode is None
and clone.toxml() == '<doc attr="value"/>'
, "testCloneElementShallow")
dom.unlink()
def testCloneElementDeep(self):
dom, clone = self._setupCloneElement(1)
self.confirm(len(clone.childNodes) == 1
and clone.childNodes.length == 1
and clone.parentNode is None
and clone.toxml() == '<doc attr="value"><foo/></doc>'
, "testCloneElementDeep")
dom.unlink()
def testCloneDocumentShallow(self):
doc = parseString("<?xml version='1.0'?>\n"
"<!-- comment -->"
"<!DOCTYPE doc [\n"
"<!NOTATION notation SYSTEM 'http://xml.python.org/'>\n"
"]>\n"
"<doc attr='value'/>")
doc2 = doc.cloneNode(0)
self.confirm(doc2 is None,
"testCloneDocumentShallow:"
" shallow cloning of documents makes no sense!")
def testCloneDocumentDeep(self):
doc = parseString("<?xml version='1.0'?>\n"
"<!-- comment -->"
"<!DOCTYPE doc [\n"
"<!NOTATION notation SYSTEM 'http://xml.python.org/'>\n"
"]>\n"
"<doc attr='value'/>")
doc2 = doc.cloneNode(1)
self.confirm(not (doc.isSameNode(doc2) or doc2.isSameNode(doc)),
"testCloneDocumentDeep: document objects not distinct")
self.confirm(len(doc.childNodes) == len(doc2.childNodes),
"testCloneDocumentDeep: wrong number of Document children")
self.confirm(doc2.documentElement.nodeType == Node.ELEMENT_NODE,
"testCloneDocumentDeep: documentElement not an ELEMENT_NODE")
self.confirm(doc2.documentElement.ownerDocument.isSameNode(doc2),
"testCloneDocumentDeep: documentElement owner is not new document")
self.confirm(not doc.documentElement.isSameNode(doc2.documentElement),
"testCloneDocumentDeep: documentElement should not be shared")
if doc.doctype is not None:
# check the doctype iff the original DOM maintained it
self.confirm(doc2.doctype.nodeType == Node.DOCUMENT_TYPE_NODE,
"testCloneDocumentDeep: doctype not a DOCUMENT_TYPE_NODE")
self.confirm(doc2.doctype.ownerDocument.isSameNode(doc2))
self.confirm(not doc.doctype.isSameNode(doc2.doctype))
def testCloneDocumentTypeDeepOk(self):
doctype = create_nonempty_doctype()
clone = doctype.cloneNode(1)
self.confirm(clone is not None
and clone.nodeName == doctype.nodeName
and clone.name == doctype.name
and clone.publicId == doctype.publicId
and clone.systemId == doctype.systemId
and len(clone.entities) == len(doctype.entities)
and clone.entities.item(len(clone.entities)) is None
and len(clone.notations) == len(doctype.notations)
and clone.notations.item(len(clone.notations)) is None
and len(clone.childNodes) == 0)
for i in range(len(doctype.entities)):
se = doctype.entities.item(i)
ce = clone.entities.item(i)
self.confirm((not se.isSameNode(ce))
and (not ce.isSameNode(se))
and ce.nodeName == se.nodeName
and ce.notationName == se.notationName
and ce.publicId == se.publicId
and ce.systemId == se.systemId
and ce.encoding == se.encoding
and ce.actualEncoding == se.actualEncoding
and ce.version == se.version)
for i in range(len(doctype.notations)):
sn = doctype.notations.item(i)
cn = clone.notations.item(i)
self.confirm((not sn.isSameNode(cn))
and (not cn.isSameNode(sn))
and cn.nodeName == sn.nodeName
and cn.publicId == sn.publicId
and cn.systemId == sn.systemId)
def testCloneDocumentTypeDeepNotOk(self):
doc = create_doc_with_doctype()
clone = doc.doctype.cloneNode(1)
self.confirm(clone is None, "testCloneDocumentTypeDeepNotOk")
def testCloneDocumentTypeShallowOk(self):
doctype = create_nonempty_doctype()
clone = doctype.cloneNode(0)
self.confirm(clone is not None
and clone.nodeName == doctype.nodeName
and clone.name == doctype.name
and clone.publicId == doctype.publicId
and clone.systemId == doctype.systemId
and len(clone.entities) == 0
and clone.entities.item(0) is None
and len(clone.notations) == 0
and clone.notations.item(0) is None
and len(clone.childNodes) == 0)
def testCloneDocumentTypeShallowNotOk(self):
doc = create_doc_with_doctype()
clone = doc.doctype.cloneNode(0)
self.confirm(clone is None, "testCloneDocumentTypeShallowNotOk")
def check_import_document(self, deep, testName):
doc1 = parseString("<doc/>")
doc2 = parseString("<doc/>")
self.assertRaises(xml.dom.NotSupportedErr, doc1.importNode, doc2, deep)
def testImportDocumentShallow(self):
self.check_import_document(0, "testImportDocumentShallow")
def testImportDocumentDeep(self):
self.check_import_document(1, "testImportDocumentDeep")
def testImportDocumentTypeShallow(self):
src = create_doc_with_doctype()
target = create_doc_without_doctype()
self.assertRaises(xml.dom.NotSupportedErr, target.importNode,
src.doctype, 0)
def testImportDocumentTypeDeep(self):
src = create_doc_with_doctype()
target = create_doc_without_doctype()
self.assertRaises(xml.dom.NotSupportedErr, target.importNode,
src.doctype, 1)
# Testing attribute clones uses a helper, and should always be deep,
# even if the argument to cloneNode is false.
def check_clone_attribute(self, deep, testName):
doc = parseString("<doc attr='value'/>")
attr = doc.documentElement.getAttributeNode("attr")
self.assertNotEqual(attr, None)
clone = attr.cloneNode(deep)
self.confirm(not clone.isSameNode(attr))
self.confirm(not attr.isSameNode(clone))
self.confirm(clone.ownerElement is None,
testName + ": ownerElement should be None")
self.confirm(clone.ownerDocument.isSameNode(attr.ownerDocument),
testName + ": ownerDocument does not match")
self.confirm(clone.specified,
testName + ": cloned attribute must have specified == True")
def testCloneAttributeShallow(self):
self.check_clone_attribute(0, "testCloneAttributeShallow")
def testCloneAttributeDeep(self):
self.check_clone_attribute(1, "testCloneAttributeDeep")
def check_clone_pi(self, deep, testName):
doc = parseString("<?target data?><doc/>")
pi = doc.firstChild
self.assertEqual(pi.nodeType, Node.PROCESSING_INSTRUCTION_NODE)
clone = pi.cloneNode(deep)
self.confirm(clone.target == pi.target
and clone.data == pi.data)
def testClonePIShallow(self):
self.check_clone_pi(0, "testClonePIShallow")
def testClonePIDeep(self):
self.check_clone_pi(1, "testClonePIDeep")
def testNormalize(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode("second"))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalize -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild is root.lastChild
and root.firstChild.data == "firstsecond"
, "testNormalize -- result")
doc.unlink()
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
doc.normalize()
self.confirm(len(root.childNodes) == 0
and root.childNodes.length == 0,
"testNormalize -- single empty node removed")
doc.unlink()
def testNormalizeCombineAndNextSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode("second"))
root.appendChild(doc.createElement("i"))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3,
"testNormalizeCombineAndNextSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.firstChild.data == "firstsecond"
and root.firstChild is not root.lastChild
and root.firstChild.nextSibling is root.lastChild
and root.firstChild.previousSibling is None
and root.lastChild.previousSibling is root.firstChild
and root.lastChild.nextSibling is None
, "testNormalizeCombinedAndNextSibling -- result")
doc.unlink()
def testNormalizeDeleteWithPrevSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode("first"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalizeDeleteWithPrevSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild.data == "first"
and root.firstChild is root.lastChild
and root.firstChild.nextSibling is None
and root.firstChild.previousSibling is None
, "testNormalizeDeleteWithPrevSibling -- result")
doc.unlink()
def testNormalizeDeleteWithNextSibling(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("second"))
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2,
"testNormalizeDeleteWithNextSibling -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild.data == "second"
and root.firstChild is root.lastChild
and root.firstChild.nextSibling is None
and root.firstChild.previousSibling is None
, "testNormalizeDeleteWithNextSibling -- result")
doc.unlink()
def testNormalizeDeleteWithTwoNonTextSiblings(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createElement("i"))
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createElement("i"))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3,
"testNormalizeDeleteWithTwoSiblings -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and root.firstChild is not root.lastChild
and root.firstChild.nextSibling is root.lastChild
and root.firstChild.previousSibling is None
and root.lastChild.previousSibling is root.firstChild
and root.lastChild.nextSibling is None
, "testNormalizeDeleteWithTwoSiblings -- result")
doc.unlink()
def testNormalizeDeleteAndCombine(self):
doc = parseString("<doc/>")
root = doc.documentElement
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("second"))
root.appendChild(doc.createTextNode(""))
root.appendChild(doc.createTextNode("fourth"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 5
and root.childNodes.length == 5,
"testNormalizeDeleteAndCombine -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 1
and root.childNodes.length == 1
and root.firstChild is root.lastChild
and root.firstChild.data == "secondfourth"
and root.firstChild.previousSibling is None
and root.firstChild.nextSibling is None
, "testNormalizeDeleteAndCombine -- result")
doc.unlink()
def testNormalizeRecursion(self):
doc = parseString("<doc>"
"<o>"
"<i/>"
"t"
#
#x
"</o>"
"<o>"
"<o>"
"t2"
#x2
"</o>"
"t3"
#x3
"</o>"
#
"</doc>")
root = doc.documentElement
root.childNodes[0].appendChild(doc.createTextNode(""))
root.childNodes[0].appendChild(doc.createTextNode("x"))
root.childNodes[1].childNodes[0].appendChild(doc.createTextNode("x2"))
root.childNodes[1].appendChild(doc.createTextNode("x3"))
root.appendChild(doc.createTextNode(""))
self.confirm(len(root.childNodes) == 3
and root.childNodes.length == 3
and len(root.childNodes[0].childNodes) == 4
and root.childNodes[0].childNodes.length == 4
and len(root.childNodes[1].childNodes) == 3
and root.childNodes[1].childNodes.length == 3
and len(root.childNodes[1].childNodes[0].childNodes) == 2
and root.childNodes[1].childNodes[0].childNodes.length == 2
, "testNormalize2 -- preparation")
doc.normalize()
self.confirm(len(root.childNodes) == 2
and root.childNodes.length == 2
and len(root.childNodes[0].childNodes) == 2
and root.childNodes[0].childNodes.length == 2
and len(root.childNodes[1].childNodes) == 2
and root.childNodes[1].childNodes.length == 2
and len(root.childNodes[1].childNodes[0].childNodes) == 1
and root.childNodes[1].childNodes[0].childNodes.length == 1
, "testNormalize2 -- childNodes lengths")
self.confirm(root.childNodes[0].childNodes[1].data == "tx"
and root.childNodes[1].childNodes[0].childNodes[0].data == "t2x2"
and root.childNodes[1].childNodes[1].data == "t3x3"
, "testNormalize2 -- joined text fields")
self.confirm(root.childNodes[0].childNodes[1].nextSibling is None
and root.childNodes[0].childNodes[1].previousSibling
is root.childNodes[0].childNodes[0]
and root.childNodes[0].childNodes[0].previousSibling is None
and root.childNodes[0].childNodes[0].nextSibling
is root.childNodes[0].childNodes[1]
and root.childNodes[1].childNodes[1].nextSibling is None
and root.childNodes[1].childNodes[1].previousSibling
is root.childNodes[1].childNodes[0]
and root.childNodes[1].childNodes[0].previousSibling is None
and root.childNodes[1].childNodes[0].nextSibling
is root.childNodes[1].childNodes[1]
, "testNormalize2 -- sibling pointers")
doc.unlink()
def testBug0777884(self):
doc = parseString("<o>text</o>")
text = doc.documentElement.childNodes[0]
self.assertEqual(text.nodeType, Node.TEXT_NODE)
# Should run quietly, doing nothing.
text.normalize()
doc.unlink()
def testBug1433694(self):
doc = parseString("<o><i/>t</o>")
node = doc.documentElement
node.childNodes[1].nodeValue = ""
node.normalize()
self.confirm(node.childNodes[-1].nextSibling is None,
"Final child's .nextSibling should be None")
def testSiblings(self):
doc = parseString("<doc><?pi?>text?<elm/></doc>")
root = doc.documentElement
(pi, text, elm) = root.childNodes
self.confirm(pi.nextSibling is text and
pi.previousSibling is None and
text.nextSibling is elm and
text.previousSibling is pi and
elm.nextSibling is None and
elm.previousSibling is text, "testSiblings")
doc.unlink()
def testParents(self):
doc = parseString(
"<doc><elm1><elm2/><elm2><elm3/></elm2></elm1></doc>")
root = doc.documentElement
elm1 = root.childNodes[0]
(elm2a, elm2b) = elm1.childNodes
elm3 = elm2b.childNodes[0]
self.confirm(root.parentNode is doc and
elm1.parentNode is root and
elm2a.parentNode is elm1 and
elm2b.parentNode is elm1 and
elm3.parentNode is elm2b, "testParents")
doc.unlink()
def testNodeListItem(self):
doc = parseString("<doc><e/><e/></doc>")
children = doc.childNodes
docelem = children[0]
self.confirm(children[0] is children.item(0)
and children.item(1) is None
and docelem.childNodes.item(0) is docelem.childNodes[0]
and docelem.childNodes.item(1) is docelem.childNodes[1]
and docelem.childNodes.item(0).childNodes.item(0) is None,
"test NodeList.item()")
doc.unlink()
def testSAX2DOM(self):
from xml.dom import pulldom
sax2dom = pulldom.SAX2DOM()
sax2dom.startDocument()
sax2dom.startElement("doc", {})
sax2dom.characters("text")
sax2dom.startElement("subelm", {})
sax2dom.characters("text")
sax2dom.endElement("subelm")
sax2dom.characters("text")
sax2dom.endElement("doc")
sax2dom.endDocument()
doc = sax2dom.document
root = doc.documentElement
(text1, elm1, text2) = root.childNodes
text3 = elm1.childNodes[0]
self.confirm(text1.previousSibling is None and
text1.nextSibling is elm1 and
elm1.previousSibling is text1 and
elm1.nextSibling is text2 and
text2.previousSibling is elm1 and
text2.nextSibling is None and
text3.previousSibling is None and
text3.nextSibling is None, "testSAX2DOM - siblings")
self.confirm(root.parentNode is doc and
text1.parentNode is root and
elm1.parentNode is root and
text2.parentNode is root and
text3.parentNode is elm1, "testSAX2DOM - parents")
doc.unlink()
def testEncodings(self):
doc = parseString('<foo>€</foo>')
self.confirm(doc.toxml() == u'<?xml version="1.0" ?><foo>\u20ac</foo>'
and doc.toxml('utf-8') ==
'<?xml version="1.0" encoding="utf-8"?><foo>\xe2\x82\xac</foo>'
and doc.toxml('iso-8859-15') ==
'<?xml version="1.0" encoding="iso-8859-15"?><foo>\xa4</foo>',
"testEncodings - encoding EURO SIGN")
# Verify that character decoding errors raise exceptions instead
# of crashing
self.assertRaises(UnicodeDecodeError, parseString,
'<fran\xe7ais>Comment \xe7a va ? Tr\xe8s bien ?</fran\xe7ais>')
doc.unlink()
class UserDataHandler:
called = 0
def handle(self, operation, key, data, src, dst):
dst.setUserData(key, data + 1, self)
src.setUserData(key, None, None)
self.called = 1
def testUserData(self):
dom = Document()
n = dom.createElement('e')
self.confirm(n.getUserData("foo") is None)
n.setUserData("foo", None, None)
self.confirm(n.getUserData("foo") is None)
n.setUserData("foo", 12, 12)
n.setUserData("bar", 13, 13)
self.confirm(n.getUserData("foo") == 12)
self.confirm(n.getUserData("bar") == 13)
n.setUserData("foo", None, None)
self.confirm(n.getUserData("foo") is None)
self.confirm(n.getUserData("bar") == 13)
handler = self.UserDataHandler()
n.setUserData("bar", 12, handler)
c = n.cloneNode(1)
self.confirm(handler.called
and n.getUserData("bar") is None
and c.getUserData("bar") == 13)
n.unlink()
c.unlink()
dom.unlink()
def checkRenameNodeSharedConstraints(self, doc, node):
# Make sure illegal NS usage is detected:
self.assertRaises(xml.dom.NamespaceErr, doc.renameNode, node,
"http://xml.python.org/ns", "xmlns:foo")
doc2 = parseString("<doc/>")
self.assertRaises(xml.dom.WrongDocumentErr, doc2.renameNode, node,
xml.dom.EMPTY_NAMESPACE, "foo")
def testRenameAttribute(self):
doc = parseString("<doc a='v'/>")
elem = doc.documentElement
attrmap = elem.attributes
attr = elem.attributes['a']
# Simple renaming
attr = doc.renameNode(attr, xml.dom.EMPTY_NAMESPACE, "b")
self.confirm(attr.name == "b"
and attr.nodeName == "b"
and attr.localName is None
and attr.namespaceURI == xml.dom.EMPTY_NAMESPACE
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b").isSameNode(attr)
and attrmap["b"].isSameNode(attr)
and attr.ownerDocument.isSameNode(doc)
and attr.ownerElement.isSameNode(elem))
# Rename to have a namespace, no prefix
attr = doc.renameNode(attr, "http://xml.python.org/ns", "c")
self.confirm(attr.name == "c"
and attr.nodeName == "c"
and attr.localName == "c"
and attr.namespaceURI == "http://xml.python.org/ns"
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c").isSameNode(attr)
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c").isSameNode(attr)
and attrmap["c"].isSameNode(attr)
and attrmap[("http://xml.python.org/ns", "c")].isSameNode(attr))
# Rename to have a namespace, with prefix
attr = doc.renameNode(attr, "http://xml.python.org/ns2", "p:d")
self.confirm(attr.name == "p:d"
and attr.nodeName == "p:d"
and attr.localName == "d"
and attr.namespaceURI == "http://xml.python.org/ns2"
and attr.prefix == "p"
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c") is None
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c") is None
and elem.getAttributeNode("p:d").isSameNode(attr)
and elem.getAttributeNodeNS(
"http://xml.python.org/ns2", "d").isSameNode(attr)
and attrmap["p:d"].isSameNode(attr)
and attrmap[("http://xml.python.org/ns2", "d")].isSameNode(attr))
# Rename back to a simple non-NS node
attr = doc.renameNode(attr, xml.dom.EMPTY_NAMESPACE, "e")
self.confirm(attr.name == "e"
and attr.nodeName == "e"
and attr.localName is None
and attr.namespaceURI == xml.dom.EMPTY_NAMESPACE
and attr.prefix is None
and attr.value == "v"
and elem.getAttributeNode("a") is None
and elem.getAttributeNode("b") is None
and elem.getAttributeNode("c") is None
and elem.getAttributeNode("p:d") is None
and elem.getAttributeNodeNS(
"http://xml.python.org/ns", "c") is None
and elem.getAttributeNode("e").isSameNode(attr)
and attrmap["e"].isSameNode(attr))
self.assertRaises(xml.dom.NamespaceErr, doc.renameNode, attr,
"http://xml.python.org/ns", "xmlns")
self.checkRenameNodeSharedConstraints(doc, attr)
doc.unlink()
def testRenameElement(self):
doc = parseString("<doc/>")
elem = doc.documentElement
# Simple renaming
elem = doc.renameNode(elem, xml.dom.EMPTY_NAMESPACE, "a")
self.confirm(elem.tagName == "a"
and elem.nodeName == "a"
and elem.localName is None
and elem.namespaceURI == xml.dom.EMPTY_NAMESPACE
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
# Rename to have a namespace, no prefix
elem = doc.renameNode(elem, "http://xml.python.org/ns", "b")
self.confirm(elem.tagName == "b"
and elem.nodeName == "b"
and elem.localName == "b"
and elem.namespaceURI == "http://xml.python.org/ns"
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
# Rename to have a namespace, with prefix
elem = doc.renameNode(elem, "http://xml.python.org/ns2", "p:c")
self.confirm(elem.tagName == "p:c"
and elem.nodeName == "p:c"
and elem.localName == "c"
and elem.namespaceURI == "http://xml.python.org/ns2"
and elem.prefix == "p"
and elem.ownerDocument.isSameNode(doc))
# Rename back to a simple non-NS node
elem = doc.renameNode(elem, xml.dom.EMPTY_NAMESPACE, "d")
self.confirm(elem.tagName == "d"
and elem.nodeName == "d"
and elem.localName is None
and elem.namespaceURI == xml.dom.EMPTY_NAMESPACE
and elem.prefix is None
and elem.ownerDocument.isSameNode(doc))
self.checkRenameNodeSharedConstraints(doc, elem)
doc.unlink()
def testRenameOther(self):
# We have to create a comment node explicitly since not all DOM
# builders used with minidom add comments to the DOM.
doc = xml.dom.minidom.getDOMImplementation().createDocument(
xml.dom.EMPTY_NAMESPACE, "e", None)
node = doc.createComment("comment")
self.assertRaises(xml.dom.NotSupportedErr, doc.renameNode, node,
xml.dom.EMPTY_NAMESPACE, "foo")
doc.unlink()
def testWholeText(self):
doc = parseString("<doc>a</doc>")
elem = doc.documentElement
text = elem.childNodes[0]
self.assertEqual(text.nodeType, Node.TEXT_NODE)
self.checkWholeText(text, "a")
elem.appendChild(doc.createTextNode("b"))
self.checkWholeText(text, "ab")
elem.insertBefore(doc.createCDATASection("c"), text)
self.checkWholeText(text, "cab")
# make sure we don't cross other nodes
splitter = doc.createComment("comment")
elem.appendChild(splitter)
text2 = doc.createTextNode("d")
elem.appendChild(text2)
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
x = doc.createElement("x")
elem.replaceChild(x, splitter)
splitter = x
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
x = doc.createProcessingInstruction("y", "z")
elem.replaceChild(x, splitter)
splitter = x
self.checkWholeText(text, "cab")
self.checkWholeText(text2, "d")
elem.removeChild(splitter)
self.checkWholeText(text, "cabd")
self.checkWholeText(text2, "cabd")
def testPatch1094164(self):
doc = parseString("<doc><e/></doc>")
elem = doc.documentElement
e = elem.firstChild
self.confirm(e.parentNode is elem, "Before replaceChild()")
# Check that replacing a child with itself leaves the tree unchanged
elem.replaceChild(e, e)
self.confirm(e.parentNode is elem, "After replaceChild()")
def testReplaceWholeText(self):
def setup():
doc = parseString("<doc>a<e/>d</doc>")
elem = doc.documentElement
text1 = elem.firstChild
text2 = elem.lastChild
splitter = text1.nextSibling
elem.insertBefore(doc.createTextNode("b"), splitter)
elem.insertBefore(doc.createCDATASection("c"), text1)
return doc, elem, text1, splitter, text2
doc, elem, text1, splitter, text2 = setup()
text = text1.replaceWholeText("new content")
self.checkWholeText(text, "new content")
self.checkWholeText(text2, "d")
self.confirm(len(elem.childNodes) == 3)
doc, elem, text1, splitter, text2 = setup()
text = text2.replaceWholeText("new content")
self.checkWholeText(text, "new content")
self.checkWholeText(text1, "cab")
self.confirm(len(elem.childNodes) == 5)
doc, elem, text1, splitter, text2 = setup()
text = text1.replaceWholeText("")
self.checkWholeText(text2, "d")
self.confirm(text is None
and len(elem.childNodes) == 2)
def testSchemaType(self):
doc = parseString(
"<!DOCTYPE doc [\n"
" <!ENTITY e1 SYSTEM 'http://xml.python.org/e1'>\n"
" <!ENTITY e2 SYSTEM 'http://xml.python.org/e2'>\n"
" <!ATTLIST doc id ID #IMPLIED \n"
" ref IDREF #IMPLIED \n"
" refs IDREFS #IMPLIED \n"
" enum (a|b) #IMPLIED \n"
" ent ENTITY #IMPLIED \n"
" ents ENTITIES #IMPLIED \n"
" nm NMTOKEN #IMPLIED \n"
" nms NMTOKENS #IMPLIED \n"
" text CDATA #IMPLIED \n"
" >\n"
"]><doc id='name' notid='name' text='splat!' enum='b'"
" ref='name' refs='name name' ent='e1' ents='e1 e2'"
" nm='123' nms='123 abc' />")
elem = doc.documentElement
# We don't want to rely on any specific loader at this point, so
# just make sure we can get to all the names, and that the
# DTD-based namespace is right. The names can vary by loader
# since each supports a different level of DTD information.
t = elem.schemaType
self.confirm(t.name is None
and t.namespace == xml.dom.EMPTY_NAMESPACE)
names = "id notid text enum ref refs ent ents nm nms".split()
for name in names:
a = elem.getAttributeNode(name)
t = a.schemaType
self.confirm(hasattr(t, "name")
and t.namespace == xml.dom.EMPTY_NAMESPACE)
def testSetIdAttribute(self):
doc = parseString("<doc a1='v' a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNode("a1")
a2 = e.getAttributeNode("a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttribute("a1")
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttribute("a2")
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttribute("a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(doc.getElementById("v") is None
and e.isSameNode(doc.getElementById("w"))
and not a1.isId
and a2.isId
and not a3.isId)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testSetIdAttributeNS(self):
NS1 = "http://xml.python.org/ns1"
NS2 = "http://xml.python.org/ns2"
doc = parseString("<doc"
" xmlns:ns1='" + NS1 + "'"
" xmlns:ns2='" + NS2 + "'"
" ns1:a1='v' ns2:a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNodeNS(NS1, "a1")
a2 = e.getAttributeNodeNS(NS2, "a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttributeNS(NS1, "a1")
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttributeNS(NS2, "a2")
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttributeNS(NS1, "a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(e.isSameNode(doc.getElementById("w")))
self.confirm(not a1.isId)
self.confirm(a2.isId)
self.confirm(not a3.isId)
self.confirm(doc.getElementById("v") is None)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testSetIdAttributeNode(self):
NS1 = "http://xml.python.org/ns1"
NS2 = "http://xml.python.org/ns2"
doc = parseString("<doc"
" xmlns:ns1='" + NS1 + "'"
" xmlns:ns2='" + NS2 + "'"
" ns1:a1='v' ns2:a2='w'/>")
e = doc.documentElement
a1 = e.getAttributeNodeNS(NS1, "a1")
a2 = e.getAttributeNodeNS(NS2, "a2")
self.confirm(doc.getElementById("v") is None
and not a1.isId
and not a2.isId)
e.setIdAttributeNode(a1)
self.confirm(e.isSameNode(doc.getElementById("v"))
and a1.isId
and not a2.isId)
e.setIdAttributeNode(a2)
self.confirm(e.isSameNode(doc.getElementById("v"))
and e.isSameNode(doc.getElementById("w"))
and a1.isId
and a2.isId)
# replace the a1 node; the new node should *not* be an ID
a3 = doc.createAttributeNS(NS1, "a1")
a3.value = "v"
e.setAttributeNode(a3)
self.confirm(e.isSameNode(doc.getElementById("w")))
self.confirm(not a1.isId)
self.confirm(a2.isId)
self.confirm(not a3.isId)
self.confirm(doc.getElementById("v") is None)
# renaming an attribute should not affect its ID-ness:
doc.renameNode(a2, xml.dom.EMPTY_NAMESPACE, "an")
self.confirm(e.isSameNode(doc.getElementById("w"))
and a2.isId)
def testPickledDocument(self):
doc = parseString("<?xml version='1.0' encoding='us-ascii'?>\n"
"<!DOCTYPE doc PUBLIC 'http://xml.python.org/public'"
" 'http://xml.python.org/system' [\n"
" <!ELEMENT e EMPTY>\n"
" <!ENTITY ent SYSTEM 'http://xml.python.org/entity'>\n"
"]><doc attr='value'> text\n"
"<?pi sample?> <!-- comment --> <e/> </doc>")
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(doc, proto)
doc2 = pickle.loads(s)
stack = [(doc, doc2)]
while stack:
n1, n2 = stack.pop()
self.confirm(n1.nodeType == n2.nodeType
and len(n1.childNodes) == len(n2.childNodes)
and n1.nodeName == n2.nodeName
and not n1.isSameNode(n2)
and not n2.isSameNode(n1))
if n1.nodeType == Node.DOCUMENT_TYPE_NODE:
len(n1.entities)
len(n2.entities)
len(n1.notations)
len(n2.notations)
self.confirm(len(n1.entities) == len(n2.entities)
and len(n1.notations) == len(n2.notations))
for i in range(len(n1.notations)):
# XXX this loop body doesn't seem to be executed?
no1 = n1.notations.item(i)
no2 = n1.notations.item(i)
self.confirm(no1.name == no2.name
and no1.publicId == no2.publicId
and no1.systemId == no2.systemId)
stack.append((no1, no2))
for i in range(len(n1.entities)):
e1 = n1.entities.item(i)
e2 = n2.entities.item(i)
self.confirm(e1.notationName == e2.notationName
and e1.publicId == e2.publicId
and e1.systemId == e2.systemId)
stack.append((e1, e2))
if n1.nodeType != Node.DOCUMENT_NODE:
self.confirm(n1.ownerDocument.isSameNode(doc)
and n2.ownerDocument.isSameNode(doc2))
for i in range(len(n1.childNodes)):
stack.append((n1.childNodes[i], n2.childNodes[i]))
def testSerializeCommentNodeWithDoubleHyphen(self):
doc = create_doc_without_doctype()
doc.appendChild(doc.createComment("foo--bar"))
self.assertRaises(ValueError, doc.toxml)
def testEmptyXMLNSValue(self):
doc = parseString("<element xmlns=''>\n"
"<foo/>\n</element>")
doc2 = parseString(doc.toxml())
self.confirm(doc2.namespaceURI == xml.dom.EMPTY_NAMESPACE)
def test_main():
run_unittest(MinidomTest)
if __name__ == "__main__":
test_main()
| {
"content_hash": "9cf593c2feb5417c76b6bd39fc92dfcf",
"timestamp": "",
"source": "github",
"line_count": 1441,
"max_line_length": 81,
"avg_line_length": 42.501040943789036,
"alnum_prop": 0.5623081444712952,
"repo_name": "mcking49/apache-flask",
"id": "a962ddc1d09aae11b7d3108c92fca88bf3cb0dec",
"size": "61272",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "Python/Lib/test/test_minidom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2501"
},
{
"name": "C",
"bytes": "479174"
},
{
"name": "C++",
"bytes": "21416"
},
{
"name": "CSS",
"bytes": "170391"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "1003190"
},
{
"name": "JavaScript",
"bytes": "1559701"
},
{
"name": "PHP",
"bytes": "3338"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "30714489"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import math
MIN_ELLIPSE_VERTICES = 50
MAX_ELLIPSE_VERTICES = 5000
FULL_CIRCLE = (0, 2*math.pi)
def constrain(n:float, minimum:float=None, maximum:float=None)->float:
if minimum is not None and n < minimum: return minimum
if maximum is not None and n > maximum: return maximum
return n
def flatten(iterable):
"""
Generator to flatten an iterable by one level
"""
for i in iterable:
yield from i
def ellipse_coord(xy:tuple, xyr:tuple, theta:float)->(float,float):
"""
Return the cartesian coordinates of point on an ellipse from its angle around the ellipse
"""
x, y = xy
xr, yr = xyr
return x + xr*math.cos(theta), y + yr*math.sin(theta)
def num_ellipse_segments(xyr:tuple, stroke_width:float)->int:
"""
Return a good number of segments to use with a ellipse with the given parameters
"""
xr, yr = xyr
if xr == yr: # circle
circumference = 2 * math.pi * xr
else: # ellipse (approximation)
circumference = math.pi * (3*(xr + yr) - math.sqrt((3*xr + yr) * (xr + 3*yr)))
return int(constrain(circumference/stroke_width, MIN_ELLIPSE_VERTICES, MAX_ELLIPSE_VERTICES))
def ellipse_points(xy:tuple, xyr:tuple, stroke_width:float, start_stop:tuple=FULL_CIRCLE, extra_points:tuple=()):
"""
A generator that returns the points that make up an ellipse with the given parameters
As a list, the result would look like this [x1, y1, x2, y2, ...]
Also can generate more complicated ellipse-like shapes such as arcs using the start_stop and extra_points parameters
"""
start, stop = start_stop
angle_change = stop - start
# we use ceil because we want a point even if angle_change is very small
segments = math.ceil(num_ellipse_segments(xyr, stroke_width) * angle_change / (2*math.pi))
yield from extra_points
for s in range(segments):
theta = 2 * math.pi * s / segments
# this will only change theta if start_stop isn't (0, 2pi)
theta = start + theta*angle_change / (2*math.pi)
yield from ellipse_coord(xy, xyr, theta) | {
"content_hash": "619e49be4db717a1081e1d3c78c32b47",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 120,
"avg_line_length": 38.666666666666664,
"alnum_prop": 0.6657088122605364,
"repo_name": "Potato42/primitivepyg",
"id": "e0d0170b4fa53a3bc3030bc953d2bd9e723862df",
"size": "2088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "primitivepyg/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36556"
}
],
"symlink_target": ""
} |
import mock
from rally.plugins.openstack.scenarios.sahara import (node_group_templates
as ngts)
from tests.unit import test
SAHARA_NGTS = ("rally.plugins.openstack.scenarios.sahara.node_group_templates"
".SaharaNodeGroupTemplates")
class SaharaNodeGroupTemplatesTestCase(test.TestCase):
def setUp(self):
super(SaharaNodeGroupTemplatesTestCase, self).setUp()
self.context = test.get_test_context()
@mock.patch(SAHARA_NGTS + "._list_node_group_templates")
@mock.patch(SAHARA_NGTS + "._create_master_node_group_template",
return_value=object())
@mock.patch(SAHARA_NGTS + "._create_worker_node_group_template",
return_value=object)
def test_create_and_list_node_group_templates(
self,
mock__create_worker_node_group_template,
mock__create_master_node_group_template,
mock__list_node_group_templates):
ngts_scenario = ngts.SaharaNodeGroupTemplates(self.context)
ngts_scenario.create_and_list_node_group_templates("test_flavor",
"test_plugin",
"test_version")
mock__create_master_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version",
use_autoconfig=True)
mock__create_worker_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version",
use_autoconfig=True)
mock__list_node_group_templates.assert_called_once_with()
@mock.patch(SAHARA_NGTS + "._delete_node_group_template")
@mock.patch(SAHARA_NGTS + "._create_master_node_group_template",
return_value=mock.MagicMock(id=1))
@mock.patch(SAHARA_NGTS + "._create_worker_node_group_template",
return_value=mock.MagicMock(id=2))
def test_create_delete_node_group_templates(
self,
mock__create_worker_node_group_template,
mock__create_master_node_group_template,
mock__delete_node_group_template):
ngts_scenario = ngts.SaharaNodeGroupTemplates(self.context)
ngts_scenario.create_delete_node_group_templates(
"test_flavor",
"test_plugin",
"test_version")
mock__create_master_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version",
use_autoconfig=True)
mock__create_worker_node_group_template.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version",
use_autoconfig=True)
mock__delete_node_group_template.assert_has_calls(calls=[
mock.call(mock__create_master_node_group_template.return_value),
mock.call(mock__create_worker_node_group_template.return_value)])
| {
"content_hash": "26adc6a89eb9bf113220a29be2548852",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 42.693333333333335,
"alnum_prop": 0.6030605871330419,
"repo_name": "gluke77/rally",
"id": "abe7bcae8ddfed43932c152268521c3d1a1a1c33",
"size": "3832",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/unit/plugins/openstack/scenarios/sahara/test_node_group_templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "51524"
},
{
"name": "JavaScript",
"bytes": "7665"
},
{
"name": "Mako",
"bytes": "17949"
},
{
"name": "Python",
"bytes": "3480454"
},
{
"name": "Shell",
"bytes": "43835"
}
],
"symlink_target": ""
} |
"""Test the importmulti RPC."""
from test_framework.test_framework import PivxTestFramework
from test_framework.util import *
class ImportMultiTest (PivxTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]]
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
node0_address1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
#Check only one address
assert_equal(node0_address1['ismine'], True)
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),1)
#Address Test - before import
address_info = self.nodes[1].validateaddress(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address
self.log.info("Should import an address")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_address = address['address']
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": "not valid address",
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Invalid address')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + !internal
self.log.info("Should not import a scriptPubKey without internal flag")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Public key + !Internal
self.log.info("Should import an address with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + !internal
self.log.info("Should not import a scriptPubKey without internal and with public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import an address with private key if is already imported")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script')
# Address + Private key + watchonly
self.log.info("Should not import an address with private key and with watchonly")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Private key + !internal
self.log.info("Should not import a scriptPubKey without internal and with private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# P2SH address
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['isscript'], True)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript']
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
sig_address_1 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
transactionid = self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should not import an address with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
address = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
address2 = self.nodes[0].validateaddress(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].validateaddress(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": watchonly_address,
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
address_assert = self.nodes[1].validateaddress(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "",
}])
if __name__ == '__main__':
ImportMultiTest ().main ()
| {
"content_hash": "a8f6c79ea7653e833e21486c8d904220",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 135,
"avg_line_length": 48.18568232662192,
"alnum_prop": 0.6133525233297739,
"repo_name": "martexcoin/martexcoin",
"id": "730841348ff26bf14b6750a9e5582d7eeeab80f0",
"size": "21753",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/functional/wallet_importmulti.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "6549796"
},
{
"name": "C++",
"bytes": "5425220"
},
{
"name": "CMake",
"bytes": "12720"
},
{
"name": "CSS",
"bytes": "184584"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "205547"
},
{
"name": "Makefile",
"bytes": "121633"
},
{
"name": "Objective-C++",
"bytes": "6690"
},
{
"name": "Python",
"bytes": "1023906"
},
{
"name": "QMake",
"bytes": "26119"
},
{
"name": "Sage",
"bytes": "30188"
},
{
"name": "Shell",
"bytes": "35318"
}
],
"symlink_target": ""
} |
from zeep import xsd
from .mappings import query_type_mapping
def query_filter(vm, field, value, query_type):
query_function = query_type_mapping[query_type]
if field['type'] is 'String':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=xsd.AnyObject(xsd.String(), value))
elif field['type'] is 'Id':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=vm.type_factory.Id(value))
elif field['type'] is 'Long':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=xsd.AnyObject(xsd.Long(), value))
elif field['type'] is 'Boolean':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=xsd.AnyObject(xsd.Boolean(), value))
elif field['type'] is 'OsVersion':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=xsd.AnyObject(vm.query_factory.OsVersion, value))
elif field['type'] is 'ClientState':
query_filter = vm.query_factory[query_function](Field=field['name'], Value=xsd.AnyObject(vm.query_factory.ClientState, value))
else:
raise Exception("Can't determine Value type")
return query_filter
def multi_query(vm, filters, join_type):
query_array = vm.query_factory.ArrayOfQueryFilter(QueryFilter=filters)
if join_type is 'OR':
multi_filter = vm.query_factory.QueryFilterOr(Filters=query_array)
elif join_type is 'AND':
multi_filter = vm.query_factory.QueryFilterAnd(Filters=query_array)
else:
raise Exception('join_type must be either OR or AND')
return multi_filter
def query(vm, field, value, page=1, query_type='BEGINS_WITH'):
if isinstance(value, list):
filters = [query_filter(vm, field=field, value=item, query_type=query_type) for item in value]
q_filter = multi_query(vm, filters, 'OR')
else:
q_filter = query_filter(vm, field=field, value=value, query_type=query_type)
return vm.query_factory.QueryDefinition(Filter=q_filter, Page=page)
def _collect_query_results(vm, field, value, query_type, query_function, **kwargs):
results = []
current_page = 1
while True:
query_definition = query(vm, field=field, value=value, page=current_page, query_type=query_type)
result = query_function(queryDefinition=query_definition, **kwargs)
# Drop out if there are no results
if result['Elements'] is None:
break
results += result['Elements']['anyType']
# Stop if on the last page
if not result['NextPageAvailable']:
break
current_page += 1
return results
| {
"content_hash": "a9a54e53b92ab30f4699c46c256029f9",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 134,
"avg_line_length": 46.50877192982456,
"alnum_prop": 0.6725763862693324,
"repo_name": "jay-tuckey/python-vmwaremirage",
"id": "fa3a1baeeb6bb6bec63010a8302e6276e3ca6601",
"size": "2651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vmwaremirage/queries.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22141"
}
],
"symlink_target": ""
} |
import theano
import theano.tensor as T
import numpy as np
from recnn_train import RNTN as TheanoRNTN
from recnn import RNTN as NumpyRNTN, RNTNLayer
from numpy_impl import LogisticRegression
from test_util import assert_matrix_eq
vocab_size = 6
embed_dim = 3
label_n = 5
word2id = {
'I': 0,
'love': 1,
'you':2,
'<UNK>': 5,
}
x = T.imatrix('x')
y = T.ivector('y')
th_model = TheanoRNTN(x, y, vocab_size, embed_dim, label_n)
np_model = NumpyRNTN.load_from_theano_model(th_model, word2id)# (embedding = th_model.embedding.get_value(),
# rntn_layer = RNTNLayer(th_model.rntn_layer.V.get_value(), th_model.rntn_layer.W.get_value()),
# logreg_layer = LogisticRegression(th_model.logreg_layer.W.get_value(), th_model.logreg_layer.b.get_value()),
# word2id = word2id)
x_input = np.asarray([[4, 2, 5],
[3, 1, 4]],
dtype=np.int32)
tree_input = (5, "love", (3, (3, "you"), (3, "bro")))
actual = np_model.get_node_vector(tree_input)
th_model.update_embedding(x_input)
expected = th_model.embedding.get_value()[3]
assert_matrix_eq(actual, expected, "node vector")
get_label = theano.function(inputs = [x],
outputs = th_model.logreg_layer.pred_y)
score = np_model.predict_top_node(tree_input)
assert isinstance(score, np.int64)
assert_matrix_eq(score, get_label(x_input[1:2,:]), 'logreg.predict')
| {
"content_hash": "758e032233d2e592f663bb24d274776f",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 132,
"avg_line_length": 27.96153846153846,
"alnum_prop": 0.625171939477304,
"repo_name": "xiaohan2012/twitter-sent-dnn",
"id": "bcd66a3cc72d659f653543ff48ee39cac052eb42",
"size": "1454",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test_recnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149219"
},
{
"name": "Shell",
"bytes": "1084"
}
],
"symlink_target": ""
} |
"""Tota game runner.
Usage:
./play.py --help
./play.py RADIANT_HEROES DIRE_HEROES [-m MAP] [-s SIZE] [-d] [-b] [-f MAX_FRAMES] [-c] [-r REPLAY_DIR] [-q] [-p]
DIRE_HEROES and RADIANT_HEROES must be comma separated lists
Options:
-h --help Show this help.
-m MAP The path to the map file to use (there is a default
map)
-s SIZE The size of the world. Format: COLUMNSxROWS
-d Debug mode (lots of extra info, and step by
step game play)
-f MAX_FRAMES Maximum frames per second [default: 2].
-b Use basic icons if you have trouble with the
normal icons.
-c Use a compressed view if the default one is too wide
for your terminal.
-r REPLAY_DIR Save a json replay, which consists in *lots* of files
(1 per tick) inside the specified dir.
-q Don't draw the map in the terminal.
-p Protect against exceptions, keep playing if a hero or
fails during an act or action.
"""
import os
from docopt import docopt
from tota.game import Game
from tota.drawers.terminal import TerminalDrawer
from tota.drawers.json_replay import JsonReplayDrawer
DEFAULT_MAP_SIZE = (87, 33)
DEFAULT_MAP_PATH = './map.txt'
def play():
"""Initiate a game, using the command line arguments as configuration."""
arguments = docopt(__doc__)
# start a game
# parse arguments
debug = arguments['-d']
protected = arguments['-p']
use_basic_icons = arguments['-b']
use_compressed_view = arguments['-c']
max_frames = int(arguments['-f'])
radiant_heroes = arguments['RADIANT_HEROES'].split(',')
dire_heroes = arguments['DIRE_HEROES'].split(',')
drawers = []
if not arguments['-q']:
drawers.append(TerminalDrawer(use_basic_icons=use_basic_icons,
use_compressed_view=use_compressed_view))
if arguments['-r']:
replay_dir = arguments['-r']
drawers.append(JsonReplayDrawer(replay_dir=replay_dir))
size = arguments['-s']
if size:
size = tuple(map(int, size.split('x')))
else:
size = DEFAULT_MAP_SIZE
map_path = arguments['-m'] or DEFAULT_MAP_PATH
# create and start game
g = Game(radiant_heroes=radiant_heroes,
dire_heroes=dire_heroes,
map_file_path=map_path,
world_size=size,
debug=debug,
protected=protected,
drawers=drawers)
os.system('clear')
g.play(max_frames)
if __name__ == '__main__':
play()
| {
"content_hash": "37cbb0d210b5ca6e73281e908b04b8e0",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 116,
"avg_line_length": 33.19277108433735,
"alnum_prop": 0.5651542649727768,
"repo_name": "dmoisset/tota",
"id": "c3e467d55c068200d99776741b9c853606a91de0",
"size": "2777",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tota/play.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "51804"
}
],
"symlink_target": ""
} |
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class GenericFilterFormHelper(FormHelper):
def __init__(self, *args, **kwargs):
super(GenericFilterFormHelper, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.form_class = 'genericFilterForm'
self.form_method = 'GET'
self.add_input(Submit('Filter', 'Search'))
| {
"content_hash": "672af03e89d693b1ba39b70874b25800",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 70,
"avg_line_length": 36.72727272727273,
"alnum_prop": 0.6732673267326733,
"repo_name": "acdh-oeaw/vhioe",
"id": "e4bab9d3f9dfea56fd02bd64991e366eea9e4ae5",
"size": "404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "browsing/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25850"
},
{
"name": "HTML",
"bytes": "105907"
},
{
"name": "JavaScript",
"bytes": "220270"
},
{
"name": "Python",
"bytes": "91715"
}
],
"symlink_target": ""
} |
import os
import IECore
import GafferScene
import GafferSceneTest
class VDBTestCase( GafferSceneTest.SceneTestCase ) :
def setUp( self ) :
GafferSceneTest.SceneTestCase.setUp( self )
self.dataDir = os.path.join( os.path.dirname( __file__ ), "data")
self.filters = [] # need to keep hold of the filters for the duration of the test
def setFilter(self, node, path = '/vdb'):
pathFilter = GafferScene.PathFilter( "PathFilter" )
pathFilter["paths"].setValue( IECore.StringVectorData( [ path ] ) )
self.filters.append( pathFilter )
node["filter"].setInput( pathFilter["out"] )
| {
"content_hash": "a8781120d7b19f7c0dce7a5423bef743",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 83,
"avg_line_length": 28.38095238095238,
"alnum_prop": 0.7181208053691275,
"repo_name": "johnhaddon/gaffer",
"id": "064d80095040792c2412132bff4caf1b7be77b38",
"size": "2403",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "python/GafferVDBTest/VDBTestCase.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5790"
},
{
"name": "C",
"bytes": "61993"
},
{
"name": "C++",
"bytes": "9571062"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "10271481"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14389"
}
],
"symlink_target": ""
} |
__author__ = 'max'
import gpxpandas.gpxreader as gpx_reader
import os
def read_gpx_file(gpx_file_name):
with open(gpx_file_name, 'r') as gpx_file:
gpx = gpx_reader.parse_gpx(gpx_file)
# Ensure a name after parsing.
if not gpx.name:
file_name = os.path.basename(gpx_file_name)
f_name_without_ext = os.path.splitext(file_name)
gpx.name = f_name_without_ext
return read_gpx(gpx)
def read_gpx(gpx):
return gpx_reader.pandas_data_frame_for_gpx(gpx)
def write_gpx(pd_data_frame):
# TODO write gpx file from pandas
pass | {
"content_hash": "3ef8548ac80ff3d9260f1e7e5ba7ff3c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 60,
"avg_line_length": 25.125,
"alnum_prop": 0.6301824212271974,
"repo_name": "komax/gpx-pandas",
"id": "b5ae96c182badc9f6b0a408ab753bf93e0efbb7f",
"size": "603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gpxpandas/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1639"
}
],
"symlink_target": ""
} |
"""Helper class for patching and unpatching."""
import math
from typing import NamedTuple, Optional, Tuple
import tensorflow as tf
from vct.src import extract_patches
class Patched(NamedTuple):
"""Represents a patched tensor.
Attributes:
tensor: The patched tensor, shape (b', patch_size ** 2, d)
num_patches: Tuple (n_h, n_w) indicating how many patches are in b'.
"""
tensor: tf.Tensor
num_patches: Tuple[int, int]
class Patcher:
"""Helper class for patching and unpatching."""
def __init__(
self,
stride,
pad_mode = "REFLECT",
):
"""Initializes the patch helper."""
self.stride = stride
self.pad_mode = pad_mode
def _pad(self, x, patch_size):
"""Pads `x` such that we can do VALID patch extraction."""
if patch_size < self.stride:
raise ValueError("`patch_size` must be greater than `stride`!")
# Additionally pad to handle patch_size > stride.
missing = patch_size - self.stride
if missing % 2 != 0:
raise ValueError("Can only handle even missing pixels.")
_, height, width, _ = x.shape
(n_h, n_w), (height_padded, width_padded) = self.get_num_patches(
height, width)
return tf.pad(x, [
[0, 0],
[missing // 2, height_padded - height + missing // 2],
[missing // 2, width_padded - width + missing // 2],
[0, 0],
], self.pad_mode), n_h, n_w
def get_num_patches(
self, height, width):
# Initial pad to get all strides in.
height_padded = math.ceil(height / self.stride) * self.stride
width_padded = math.ceil(width / self.stride) * self.stride
# Calculate number of patches in the height and width dimensions.
n_h = height_padded // self.stride
n_w = width_padded // self.stride
return (n_h, n_w), (height_padded, width_padded)
def __call__(self, t, patch_size):
"""Pads and extracts patches, shape (b * num_patches, size ** 2, d)."""
# First pad such that we can use `extract_patches` with padding=VALID, i.e.,
# first patch should cover top left part.
t_padded, n_h, n_w = self._pad(t, patch_size)
patches = extract_patches.extract_patches(t_padded, patch_size, self.stride)
# `extract_patches` returns (b, n_h, n_w, seq_len * d), we reshape this
# to (..., seq_len, d).
b, n_hp, n_wp, _ = patches.shape
d = t_padded.shape[-1]
assert (n_hp, n_wp) == (n_h, n_w) # Programmer error.
patches = tf.reshape(patches, (b * n_h * n_w, patch_size ** 2, d))
return Patched(patches, (n_h, n_w))
def unpatch(self, t, n_h, n_w,
crop):
"""Goes back to (b, h, w, d)."""
_, seq_len, d = t.shape
assert seq_len == self.stride ** 2
t = tf.reshape(t, (-1, n_h, n_w, self.stride, self.stride, d))
t = tf.einsum("bijhwc->bihjwc", t)
t = tf.reshape(t, (-1, n_h * self.stride, n_w * self.stride, d))
if crop:
h, w = crop
return t[:, :h, :w, :]
else:
return t
| {
"content_hash": "8702f35943f68ab55f9d8d772c65bde8",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 80,
"avg_line_length": 32.43956043956044,
"alnum_prop": 0.600609756097561,
"repo_name": "google-research/google-research",
"id": "b1ade06430dd63e026e371057617ef96ee617165",
"size": "3560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vct/src/patcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
} |
'''
Created on 2017年3月27日
@author: wanlipeng
'''
from problems.listnode import ListNode
class Solution(object):
def reverseList(self, head):
if head is None:
return None
newHead = ListNode(0)
newHead.next = head
curNode = head.next
head.next = None
while curNode is not None:
temp = curNode.next
curNode.next = newHead.next
newHead.next = curNode
curNode = temp
return newHead.next
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if head is None or head.next is None:
return True
slow = head
fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if fast: #如果fast后面还有,则说明是奇数个节点
slow.next = self.reverseList(slow.next)
slow = slow.next
else: #偶数个
slow = self.reverseList(slow)
while slow:
if head.val != slow.val:
return False
slow = slow.next
head = head.next
return True
| {
"content_hash": "655bf465f4ec73d4c5d723eb5f26b250",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 51,
"avg_line_length": 24.895833333333332,
"alnum_prop": 0.5112970711297071,
"repo_name": "lunabox/leetcode",
"id": "9601d044b5a42bf9ee0cdd0e3d9b98c197eccb45",
"size": "1252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/problems/s234_Palindrome_Linked_List.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "4977"
},
{
"name": "JavaScript",
"bytes": "19188"
},
{
"name": "Kotlin",
"bytes": "50094"
},
{
"name": "Python",
"bytes": "38767"
}
],
"symlink_target": ""
} |
"""
Django settings for todo project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'gf@v$z*d5&medo$5cm@-v#!#cpa03d1rtp-0^3rea7!lhosu*l'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'todo_react',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'todo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'todo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "static"))
STATIC_URL = '/static/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
| {
"content_hash": "bfac180f04e7349f576272044f7e3555",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 75,
"avg_line_length": 26.88135593220339,
"alnum_prop": 0.6935687263556116,
"repo_name": "samfcmc/todo_django_react",
"id": "b07654afe8abe04c7864746b4df35243be427d37",
"size": "3172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "89"
},
{
"name": "JavaScript",
"bytes": "1474"
},
{
"name": "Python",
"bytes": "6509"
}
],
"symlink_target": ""
} |
"""Generate black and white test TFRecords with Example protos.
Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import numpy as np
import tensorflow as tf
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._image = tf.placeholder(dtype=tf.uint8)
self._encode_jpeg = tf.image.encode_jpeg(
self._image, format='rgb', quality=100)
def encode_jpeg(self, image):
jpeg_image = self._sess.run(self._encode_jpeg,
feed_dict={self._image: image})
return jpeg_image
def _process_image(coder, name):
"""Process a single image file.
If name is "train", a black image is returned. Otherwise, a white image is
returned.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
name: string, unique identifier specifying the data set.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
value = 0 if name == 'train' else 255
height = random.randint(30, 299)
width = random.randint(30, 299)
image = np.full((height, width, 3), value, np.uint8)
jpeg_data = coder.encode_jpeg(image)
return jpeg_data, height, width
def _process_dataset(output_directory, num_classes, coder, name, num_images,
num_shards):
"""Process a complete data set and save it as a TFRecord.
Args:
output_directory: Where to put outputs.
num_classes: number of classes.
coder: Instance of an ImageCoder.
name: string, unique identifier specifying the data set.
num_images: number of images to generate.
num_shards: integer number of shards to create.
"""
files_per_shard = num_images // num_shards
for shard in range(num_shards):
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(output_directory, output_filename)
with tf.python_io.TFRecordWriter(output_file) as writer:
for i in range(files_per_shard):
index = shard * files_per_shard + i
image_buffer, height, width = _process_image(coder, name)
filename = '{}_{}_{}'.format(name, shard, i)
label = index % num_classes
synset = str(index)
human = name
bbox = [[0.1, 0.1, 0.9, 0.9]]
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
def write_black_and_white_tfrecord_data(
output_directory, num_classes, num_train_images=512,
num_validation_images=128, train_shards=8, validation_shards=2):
"""Writes black and white images in tfrecord format.
Training images are black and validation images are white.
Args:
output_directory: Where to put outputs.
num_classes: number of classes.
num_train_images: number of training images to generate.
num_validation_images: number of validation images to generate.
train_shards: integer number of training shards to create.
validation_shards: integer number of validation shards to create.
"""
coder = ImageCoder()
_process_dataset(output_directory, num_classes, coder, 'validation',
num_validation_images, validation_shards)
_process_dataset(output_directory, num_classes, coder, 'train',
num_train_images, train_shards)
| {
"content_hash": "2db8079fc0a244168224ccd82792c821",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 77,
"avg_line_length": 37.785714285714285,
"alnum_prop": 0.686578449905482,
"repo_name": "annarev/benchmarks",
"id": "05b62e4f14812d850ab765bef12a53bb8ecd5cfd",
"size": "8624",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/tf_cnn_benchmarks/test_data/tfrecord_image_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "813002"
},
{
"name": "Shell",
"bytes": "5545"
}
],
"symlink_target": ""
} |
try:
import ujson as json
except ImportError:
import json
try:
import msgpack
except ImportError:
pass
class DtoObject(dict):
@property
def __dict__(self):
return {k: v for k, v in self.items()}
def to_json(self, **kwargs):
return json.dumps(self, **kwargs)
def to_bytes(self, **kwargs):
return msgpack.dumps(self, **kwargs)
| {
"content_hash": "d2d7a92eba2a68654c63f9e406edbbf0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 46,
"avg_line_length": 18.38095238095238,
"alnum_prop": 0.6139896373056994,
"repo_name": "sserrot/champion_relationships",
"id": "311b91836703a1d6805443e54a80e0ff7989004c",
"size": "386",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/cassiopeia/dto/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from sentry.models import Environment, Release, ReleaseEnvironment
from sentry.testutils import TestCase
class GetOrCreateTest(TestCase):
def test_simple(self):
project = self.create_project(name="foo")
datetime = timezone.now()
release = Release.objects.create(organization_id=project.organization_id, version="abcdef")
release.add_project(project)
env = Environment.objects.create(
project_id=project.id, organization_id=project.organization_id, name="prod"
)
relenv = ReleaseEnvironment.get_or_create(
project=project, release=release, environment=env, datetime=datetime
)
assert relenv.organization_id == project.organization_id
assert relenv.release_id == release.id
assert relenv.environment_id == env.id
datetime_new = datetime + timedelta(days=1)
relenv = ReleaseEnvironment.get_or_create(
project=project, release=release, environment=env, datetime=datetime_new
)
assert relenv.first_seen == datetime
assert relenv.last_seen == datetime_new
datetime_new2 = datetime_new + timedelta(seconds=1)
# this should not update immediately as the window is too close
relenv = ReleaseEnvironment.get_or_create(
project=project, release=release, environment=env, datetime=datetime_new2
)
assert relenv.first_seen == datetime
assert relenv.last_seen == datetime_new
# shouldn't create new release env if same env, release and org
project2 = self.create_project(name="bar", organization=project.organization)
release.add_project(project2)
relenv2 = ReleaseEnvironment.get_or_create(
project=project2, release=release, environment=env, datetime=datetime
)
assert relenv.id == relenv2.id
assert ReleaseEnvironment.objects.get(id=relenv.id).last_seen == relenv2.last_seen
| {
"content_hash": "eca9ddd63159842499670c7ccb38abe5",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 99,
"avg_line_length": 37.78181818181818,
"alnum_prop": 0.6823869104908566,
"repo_name": "beeftornado/sentry",
"id": "9e284172854fef7fd42dcb8c8ec882a7872c36a6",
"size": "2078",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/sentry/models/test_releaseenvironment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
import sys
import os
# 3p
import mock
import requests
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
CONFIG = {
'init_config': {},
'instances': [
{
'rabbitmq_api_url': 'http://localhost:15672/api/',
'rabbitmq_user': 'guest',
'rabbitmq_pass': 'guest',
'queues': ['test1'],
}
]
}
CONFIG_REGEX = {
'init_config': {},
'instances': [
{
'rabbitmq_api_url': 'http://localhost:15672/api/',
'rabbitmq_user': 'guest',
'rabbitmq_pass': 'guest',
'queues_regexes': ['test\d+'],
}
]
}
CONFIG_WITH_FAMILY = {
'init_config': {},
'instances': [
{
'rabbitmq_api_url': 'http://localhost:15672/api/',
'rabbitmq_user': 'guest',
'rabbitmq_pass': 'guest',
'tag_families': True,
'queues_regexes': ['(test)\d+'],
}
]
}
COMMON_METRICS = [
'rabbitmq.node.fd_used',
'rabbitmq.node.mem_used',
'rabbitmq.node.run_queue',
'rabbitmq.node.sockets_used',
'rabbitmq.node.partitions'
]
Q_METRICS = [
'consumers',
'memory',
'messages',
'messages.rate',
'messages_ready',
'messages_ready.rate',
'messages_unacknowledged',
'messages_unacknowledged.rate',
'messages.publish.count',
'messages.publish.rate',
]
@attr(requires='rabbitmq')
class RabbitMQCheckTest(AgentCheckTest):
CHECK_NAME = 'rabbitmq'
def test_check(self):
self.run_check(CONFIG)
# Node attributes
for mname in COMMON_METRICS:
self.assertMetricTagPrefix(mname, 'rabbitmq_node', count=1)
self.assertMetric('rabbitmq.node.partitions', value=0, count=1)
# Queue attributes, should be only one queue fetched
# TODO: create a 'fake consumer' and get missing metrics
# active_consumers, acks, delivers, redelivers
for mname in Q_METRICS:
self.assertMetricTag('rabbitmq.queue.%s' %
mname, 'rabbitmq_queue:test1', count=1)
self.assertServiceCheckOK('rabbitmq.aliveness', tags=['vhost:/'])
self.assertServiceCheckOK('rabbitmq.status')
self.coverage_report()
def test_queue_regex(self):
self.run_check(CONFIG_REGEX)
# Node attributes
for mname in COMMON_METRICS:
self.assertMetricTagPrefix(mname, 'rabbitmq_node', count=1)
for mname in Q_METRICS:
self.assertMetricTag('rabbitmq.queue.%s' %
mname, 'rabbitmq_queue:test1', count=1)
self.assertMetricTag('rabbitmq.queue.%s' %
mname, 'rabbitmq_queue:test5', count=1)
self.assertMetricTag('rabbitmq.queue.%s' %
mname, 'rabbitmq_queue:tralala', count=0)
self.assertServiceCheckOK('rabbitmq.aliveness', tags=['vhost:/'])
self.assertServiceCheckOK('rabbitmq.status')
self.coverage_report()
def test_family_tagging(self):
self.run_check(CONFIG_WITH_FAMILY)
# Node attributes
for mname in COMMON_METRICS:
self.assertMetricTagPrefix(mname, 'rabbitmq_node', count=1)
for mname in Q_METRICS:
self.assertMetricTag('rabbitmq.queue.%s' %
mname, 'rabbitmq_queue_family:test', count=2)
self.assertServiceCheckOK('rabbitmq.aliveness', tags=['vhost:/'])
self.assertServiceCheckOK('rabbitmq.status')
self.coverage_report()
@attr(requires='rabbitmq')
class TestRabbitMQ(AgentCheckTest):
CHECK_NAME = 'rabbitmq'
@classmethod
def setUpClass(cls):
sys.path.append(os.path.abspath('.'))
@classmethod
def tearDownClass(cls):
sys.path.pop()
def test__get_data(self):
with mock.patch('check.requests') as r:
from check import RabbitMQ, RabbitMQException # pylint: disable=import-error,no-name-in-module
check = RabbitMQ('rabbitmq', {}, {"instances": [{"rabbitmq_api_url": "http://example.com"}]})
r.get.side_effect = [requests.exceptions.HTTPError, ValueError]
self.assertRaises(RabbitMQException, check._get_data, '')
self.assertRaises(RabbitMQException, check._get_data, '')
def test_status_check(self):
self.run_check({"instances": [{"rabbitmq_api_url": "http://example.com"}]})
self.assertEqual(len(self.service_checks), 1)
sc = self.service_checks[0]
self.assertEqual(sc['check'], 'rabbitmq.status')
self.assertEqual(sc['status'], AgentCheck.CRITICAL)
self.check._get_data = mock.MagicMock()
self.run_check({"instances": [{"rabbitmq_api_url": "http://example.com"}]})
self.assertEqual(len(self.service_checks), 1)
sc = self.service_checks[0]
self.assertEqual(sc['check'], 'rabbitmq.status')
self.assertEqual(sc['status'], AgentCheck.OK)
def test__check_aliveness(self):
self.load_check({"instances": [{"rabbitmq_api_url": "http://example.com"}]})
self.check._get_data = mock.MagicMock()
# only one vhost should be OK
self.check._get_data.side_effect = [{"status": "ok"}, {}]
self.check._check_aliveness('', vhosts=['foo', 'bar'])
sc = self.check.get_service_checks()
self.assertEqual(len(sc), 2)
self.assertEqual(sc[0]['check'], 'rabbitmq.aliveness')
self.assertEqual(sc[0]['status'], AgentCheck.OK)
self.assertEqual(sc[1]['check'], 'rabbitmq.aliveness')
self.assertEqual(sc[1]['status'], AgentCheck.CRITICAL)
# in case of connection errors, this check should stay silent
from check import RabbitMQException # pylint: disable=import-error,no-name-in-module
self.check._get_data.side_effect = RabbitMQException
self.assertRaises(RabbitMQException, self.check._check_aliveness, '')
| {
"content_hash": "7281625a5ee28cb52780c6eac85e5091",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 107,
"avg_line_length": 32.63243243243243,
"alnum_prop": 0.5986417094583403,
"repo_name": "darnould/integrations-core",
"id": "2c01f4c01eee1516ed21abba00cd0aeb9a0ea0e9",
"size": "6153",
"binary": false,
"copies": "1",
"ref": "refs/heads/mesos-log-recovery",
"path": "rabbitmq/test_rabbitmq.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2692"
},
{
"name": "Erlang",
"bytes": "15429"
},
{
"name": "Go",
"bytes": "1471"
},
{
"name": "Nginx",
"bytes": "1173"
},
{
"name": "Perl",
"bytes": "5845"
},
{
"name": "Python",
"bytes": "1539434"
},
{
"name": "Ruby",
"bytes": "177546"
},
{
"name": "Shell",
"bytes": "11460"
}
],
"symlink_target": ""
} |
'''
Watch files and translate the changes into salt events
:depends: - pyinotify Python module >= 0.9.5
:Caution: Using generic mask options like open, access, ignored, and
closed_nowrite with reactors can easily cause the reactor
to loop on itself. To mitigate this behavior, consider
setting the `disable_during_state_run` flag to `True` in
the beacon configuration.
'''
# Import Python libs
from __future__ import absolute_import
import collections
import fnmatch
import threading
import os
import re
import yaml
# Import salt libs
import salt.ext.six
import salt.loader
# Import third party libs
try:
import pyinotify
HAS_PYINOTIFY = True
DEFAULT_MASK = pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY
MASKS = {}
for var in dir(pyinotify):
if var.startswith('IN_'):
key = var[3:].lower()
MASKS[key] = getattr(pyinotify, var)
except ImportError:
HAS_PYINOTIFY = False
DEFAULT_MASK = None
__virtualname__ = 'pulsar'
__version__ = 'v2016.10.3'
CONFIG = None
CONFIG_STALENESS = 0
import logging
log = logging.getLogger(__name__)
def __virtual__():
if salt.utils.is_windows():
return False, 'This module only works on Linux'
if HAS_PYINOTIFY:
return __virtualname__
return False
def _get_mask(mask):
'''
Return the int that represents the mask
'''
return MASKS.get(mask, 0)
def _enqueue(revent):
'''
Enqueue the event
'''
__context__['pulsar.queue'].append(revent)
def _get_notifier():
'''
Check the context for the notifier and construct it if not present
'''
if 'pulsar.notifier' not in __context__:
__context__['pulsar.queue'] = collections.deque()
wm = pyinotify.WatchManager()
__context__['pulsar.notifier'] = pyinotify.Notifier(wm, _enqueue)
return __context__['pulsar.notifier']
def beacon(config):
'''
Watch the configured files
Example pillar config
.. code-block:: yaml
beacons:
pulsar:
paths:
- /var/cache/salt/minion/files/base/hubblestack_pulsar/hubblestack_pulsar_config.yaml
refresh_interval: 300
verbose: False
Example yaml config on fileserver (targeted by pillar)
.. code-block:: yaml
/path/to/file/or/dir:
mask:
- open
- create
- close_write
recurse: True
auto_add: True
exclude:
- /path/to/file/or/dir/exclude1
- /path/to/file/or/dir/exclude2
- /path/to/file/or/dir/regex[\d]*$:
regex: True
return:
splunk:
batch: True
slack:
batch: False # overrides the global setting
checksum: sha256
stats: True
batch: True
Note that if `batch: True`, the configured returner must support receiving
a list of events, rather than single one-off events.
The mask list can contain the following events (the default mask is create,
delete, and modify):
* access - File accessed
* attrib - File metadata changed
* close_nowrite - Unwritable file closed
* close_write - Writable file closed
* create - File created in watched directory
* delete - File deleted from watched directory
* delete_self - Watched file or directory deleted
* modify - File modified
* moved_from - File moved out of watched directory
* moved_to - File moved into watched directory
* move_self - Watched file moved
* open - File opened
The mask can also contain the following options:
* dont_follow - Don't dereference symbolic links
* excl_unlink - Omit events for children after they have been unlinked
* oneshot - Remove watch after one event
* onlydir - Operate only if name is directory
recurse:
Recursively watch files in the directory
auto_add:
Automatically start watching files that are created in the watched directory
exclude:
Exclude directories or files from triggering events in the watched directory.
Can use regex if regex is set to True
If pillar/grains/minion config key `hubblestack:pulsar:maintenance` is set to
True, then changes will be discarded.
'''
global CONFIG_STALENESS
global CONFIG
if config.get('verbose'):
log.debug('Pulsar beacon called.')
log.debug('Pulsar beacon config from pillar:\n{0}'.format(config))
ret = []
notifier = _get_notifier()
wm = notifier._watch_manager
update_watches = False
# Get config(s) from salt fileserver if we don't have them already
if CONFIG and CONFIG_STALENESS < config.get('refresh_interval', 300):
CONFIG_STALENESS += 1
CONFIG.update(config)
CONFIG['verbose'] = config.get('verbose')
config = CONFIG
else:
if config.get('verbose'):
log.debug('No cached config found for pulsar, retrieving fresh from disk.')
new_config = config
if isinstance(config.get('paths'), list):
for path in config['paths']:
if 'salt://' in path:
log.error('Path {0} is not an absolute path. Please use a '
'scheduled cp.cache_file job to deliver the '
'config to the minion, then provide the '
'absolute path to the cached file on the minion '
'in the beacon config.'.format(path))
continue
if os.path.isfile(path):
with open(path, 'r') as f:
new_config = _dict_update(new_config,
yaml.safe_load(f),
recursive_update=True,
merge_lists=True)
else:
log.error('Path {0} does not exist or is not a file'.format(path))
else:
log.error('Pulsar beacon \'paths\' data improperly formatted. Should be list of paths')
new_config.update(config)
config = new_config
CONFIG_STALENESS = 0
CONFIG = config
update_watches = True
if config.get('verbose'):
log.debug('Pulsar beacon config (compiled from config list):\n{0}'.format(config))
# Read in existing events
if notifier.check_events(1):
notifier.read_events()
notifier.process_events()
queue = __context__['pulsar.queue']
if config.get('verbose'):
log.debug('Pulsar found {0} inotify events.'.format(len(queue)))
while queue:
event = queue.popleft()
if event.maskname == 'IN_Q_OVERFLOW':
log.warn('Your inotify queue is overflowing.')
log.warn('Fix by increasing /proc/sys/fs/inotify/max_queued_events')
continue
_append = True
# Find the matching path in config
path = event.path
while path != '/':
if path in config:
break
path = os.path.dirname(path)
# Get pathname
try:
pathname = event.pathname
except NameError:
pathname = path
excludes = config[path].get('exclude', '')
if excludes and isinstance(excludes, list):
for exclude in excludes:
if isinstance(exclude, dict):
if exclude.values()[0].get('regex', False):
try:
if re.search(exclude.keys()[0], event.pathname):
_append = False
except:
log.warn('Failed to compile regex: {0}'.format(exclude.keys()[0]))
pass
else:
exclude = exclude.keys()[0]
elif '*' in exclude:
if fnmatch.fnmatch(event.pathname, exclude):
_append = False
else:
if event.pathname.startswith(exclude):
_append = False
if _append:
sub = {'tag': event.path,
'path': event.pathname,
'change': event.maskname,
'name': event.name}
if config.get('checksum', False) and os.path.isfile(pathname):
sum_type = config['checksum']
if not isinstance(sum_type, salt.ext.six.string_types):
sum_type = 'sha256'
sub['checksum'] = __salt__['file.get_hash'](pathname, sum_type)
sub['checksum_type'] = sum_type
if config.get('stats', False):
sub['stats'] = __salt__['file.stats'](pathname)
ret.append(sub)
else:
log.info('Excluding {0} from event for {1}'.format(event.pathname, path))
if update_watches:
# Get paths currently being watched
current = set()
for wd in wm.watches:
current.add(wm.watches[wd].path)
# Update existing watches and add new ones
# TODO: make the config handle more options
for path in config:
if path == 'return' or path == 'checksum' or path == 'stats' \
or path == 'batch' or path == 'verbose' or path == 'paths' \
or path == 'refresh_interval':
continue
if isinstance(config[path], dict):
mask = config[path].get('mask', DEFAULT_MASK)
excludes = config[path].get('exclude', None)
if isinstance(mask, list):
r_mask = 0
for sub in mask:
r_mask |= _get_mask(sub)
elif isinstance(mask, salt.ext.six.binary_type):
r_mask = _get_mask(mask)
else:
r_mask = mask
mask = r_mask
rec = config[path].get('recurse', False)
auto_add = config[path].get('auto_add', False)
else:
mask = DEFAULT_MASK
rec = False
auto_add = False
if path in current:
for wd in wm.watches:
if path == wm.watches[wd].path:
update = False
if wm.watches[wd].mask != mask:
update = True
if wm.watches[wd].auto_add != auto_add:
update = True
if update:
wm.update_watch(wd, mask=mask, rec=rec, auto_add=auto_add)
elif os.path.exists(path):
excl = None
if isinstance(excludes, list):
excl = []
for exclude in excludes:
if isinstance(exclude, dict):
excl.append(exclude.keys()[0])
else:
excl.append(exclude)
excl = pyinotify.ExcludeFilter(excl)
wm.add_watch(path, mask, rec=rec, auto_add=auto_add, exclude_filter=excl)
# Process watch removals
to_delete = []
for wd in wm.watches:
found = False
for path in config:
if path in wm.watches[wd].path:
found = True
if not found:
to_delete.append(wd)
for wd in to_delete:
wm.del_watch(wd)
if __salt__['config.get']('hubblestack:pulsar:maintenance', False):
# We're in maintenance mode, throw away findings
ret = []
if ret and 'return' in config:
__opts__['grains'] = __grains__
__opts__['pillar'] = __pillar__
__returners__ = salt.loader.returners(__opts__, __salt__)
return_config = config['return']
if isinstance(return_config, salt.ext.six.string_types):
tmp = {}
for conf in return_config.split(','):
tmp[conf] = None
return_config = tmp
for returner_mod in return_config:
returner = '{0}.returner'.format(returner_mod)
if returner not in __returners__:
log.error('Could not find {0} returner for pulsar beacon'.format(config['return']))
return ret
batch_config = config.get('batch')
if isinstance(return_config[returner_mod], dict) and return_config[returner_mod].get('batch'):
batch_config = True
if batch_config:
transformed = []
for item in ret:
transformed.append({'return': item})
if config.get('multiprocessing_return', True):
p = threading.Thread(target=__returners__[returner], args=(transformed,))
p.daemon = True
p.start()
else:
__returners__[returner](transformed)
else:
for item in ret:
if config.get('multiprocessing_return', True):
p = threading.Thread(target=__returners__[returner], args=({'return': item},))
p.daemon = True
p.start()
else:
__returners__[returner]({'return': item})
return []
else:
# Return event data
return ret
def _dict_update(dest, upd, recursive_update=True, merge_lists=False):
'''
Recursive version of the default dict.update
Merges upd recursively into dest
If recursive_update=False, will use the classic dict.update, or fall back
on a manual merge (helpful for non-dict types like FunctionWrapper)
If merge_lists=True, will aggregate list object types instead of replace.
This behavior is only activated when recursive_update=True. By default
merge_lists=False.
'''
if (not isinstance(dest, collections.Mapping)) \
or (not isinstance(upd, collections.Mapping)):
raise TypeError('Cannot update using non-dict types in dictupdate.update()')
updkeys = list(upd.keys())
if not set(list(dest.keys())) & set(updkeys):
recursive_update = False
if recursive_update:
for key in updkeys:
val = upd[key]
try:
dest_subkey = dest.get(key, None)
except AttributeError:
dest_subkey = None
if isinstance(dest_subkey, collections.Mapping) \
and isinstance(val, collections.Mapping):
ret = update(dest_subkey, val, merge_lists=merge_lists)
dest[key] = ret
elif isinstance(dest_subkey, list) \
and isinstance(val, list):
if merge_lists:
dest[key] = dest.get(key, []) + val
else:
dest[key] = upd[key]
else:
dest[key] = upd[key]
return dest
else:
try:
for k in upd.keys():
dest[k] = upd[k]
except AttributeError:
# this mapping is not a dict
for k in upd:
dest[k] = upd[k]
return dest
| {
"content_hash": "634fa5f2ac11c0f2162273830a4e4a73",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 106,
"avg_line_length": 36.463133640552996,
"alnum_prop": 0.5207582938388625,
"repo_name": "HubbleStack/Pulsar",
"id": "a7af0d1ec39cf450db474bd95e3deb5652295102",
"size": "15849",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "_beacons/pulsar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37294"
}
],
"symlink_target": ""
} |
from ..Module import Module
from ..TagData import TagData
class balance(Module):
opmap = {
'infData': 'descend',
'contract': 'set',
'contractUntil':'set',
}
def __init__(self, xmlns):
Module.__init__(self, xmlns)
self.name = 'balance'
def parse_status(self, response, tag):
status = tag.attrib['s']
response.set('status', status)
def parse_balance(self, response, tag):
date = tag.attrib['bdate']
response.set('date', date)
response.set('balance', tag.text)
def render_default(self, request, data):
command = self.render_command_with_fields(request, 'info', [
TagData('contract', data.get('contract'))
])
def render_info(self, request, data):
return self.render_default(request, data)
| {
"content_hash": "7f2b1204d891eb771b08a703bf02eeb0",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 68,
"avg_line_length": 27.258064516129032,
"alnum_prop": 0.5775147928994083,
"repo_name": "hiqdev/reppy",
"id": "c244e4fb52c53ada6fecf5f54623b85b0a191a25",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heppy/modules/balance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "36339"
}
],
"symlink_target": ""
} |
"""
API for OGC Filter Encoding (FE) constructs and metadata.
Filter Encoding: http://www.opengeospatial.org/standards/filter
Supports version 2.0.2 (09-026r2).
"""
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
from abc import ABCMeta, abstractmethod
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["dif", "fes", "gml", "ogc", "ows110", "xs", "xsi"])
ns[None] = n.get_namespace("fes")
return ns
namespaces = get_namespaces()
schema = 'http://schemas.opengis.net/filter/2.0/filterAll.xsd'
schema_location = '%s %s' % (namespaces['fes'], schema)
class FilterRequest(object):
""" filter class """
def __init__(self, parent=None, version='2.0.0'):
"""
filter Constructor
Parameters
----------
- parent: parent etree.Element object (default is None)
- version: version (default is '2.0.0')
"""
self.version = version
self._root = etree.Element(util.nspath_eval('fes:Filter', namespaces))
if parent is not None:
self._root.set(util.nspath_eval('xsi:schemaLocation', namespaces), schema_location)
def set(self, parent=False, qtype=None, keywords=[], typenames='csw:Record', propertyname='csw:AnyText', bbox=None,
identifier=None):
"""
Construct and process a GetRecords request
Parameters
----------
- parent: the parent Element object. If this is not, then generate a standalone request
- qtype: type of resource to query (i.e. service, dataset)
- keywords: list of keywords
- propertyname: the ValueReference to Filter against
- bbox: the bounding box of the spatial query in the form [minx,miny,maxx,maxy]
- identifier: the dc:identifier to query against with a PropertyIsEqualTo. Ignores all other inputs.
"""
# Set the identifier if passed. Ignore other parameters
dc_identifier_equals_filter = None
if identifier is not None:
dc_identifier_equals_filter = PropertyIsEqualTo('dc:identifier', identifier)
self._root.append(dc_identifier_equals_filter.toXML())
return self._root
# Set the query type if passed
dc_type_equals_filter = None
if qtype is not None:
dc_type_equals_filter = PropertyIsEqualTo('dc:type', qtype)
# Set a bbox query if passed
bbox_filter = None
if bbox is not None:
bbox_filter = BBox(bbox)
# Set a keyword query if passed
keyword_filter = None
if len(keywords) > 0:
if len(keywords) > 1: # loop multiple keywords into an Or
ks = []
for i in keywords:
ks.append(PropertyIsLike(propertyname, "*%s*" % i, wildCard="*"))
keyword_filter = Or(operations=ks)
elif len(keywords) == 1: # one keyword
keyword_filter = PropertyIsLike(propertyname, "*%s*" % keywords[0], wildCard="*")
# And together filters if more than one exists
filters = [_f for _f in [keyword_filter, bbox_filter, dc_type_equals_filter] if _f]
if len(filters) == 1:
self._root.append(filters[0].toXML())
elif len(filters) > 1:
self._root.append(And(operations=filters).toXML())
return self._root
def setConstraint(self, constraint, tostring=False):
"""
Construct and process a GetRecords request
Parameters
----------
- constraint: An OgcExpression object
- tostring (optional): return as string
"""
self._root.append(constraint.toXML())
if tostring:
return util.element_to_string(self._root, xml_declaration=False)
return self._root
def setConstraintList(self, constraints, tostring=False):
"""
Construct and process a GetRecords request
Parameters
----------
- constraints: A list of OgcExpression objects
The list is interpretted like so:
[a,b,c]
a || b || c
[[a,b,c]]
a && b && c
[[a,b],[c],[d],[e]] or [[a,b],c,d,e]
(a && b) || c || d || e
- tostring (optional): return as string
"""
ors = []
if len(constraints) == 1:
if isinstance(constraints[0], OgcExpression):
flt = self.setConstraint(constraints[0])
else:
self._root.append(And(operations=constraints[0]).toXML())
flt = self._root
if tostring:
return util.element_to_string(flt, xml_declaration=False)
else:
return flt
for c in constraints:
if isinstance(c, OgcExpression):
ors.append(c)
elif isinstance(c, list) or isinstance(c, tuple):
if len(c) == 1:
ors.append(c[0])
elif len(c) >= 2:
ands = []
for sub in c:
if isinstance(sub, OgcExpression):
ands.append(sub)
ors.append(And(operations=ands))
self._root.append(Or(operations=ors).toXML())
if tostring:
return util.element_to_string(self._root, xml_declaration=False)
return self._root
class FilterCapabilities(object):
"""Abstraction for Filter_Capabilities 2.0"""
def __init__(self, elem):
if elem is None:
self.spatial_operands = []
self.spatial_operators = []
self.temporal_operators = []
self.temporal_operands = []
self.scalar_comparison_operators = []
self.conformance = {}
return
# Spatial_Capabilities
self.spatial_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval(
'fes:Spatial_Capabilities/fes:GeometryOperands/fes:GeometryOperand', namespaces))]
self.spatial_operators = []
for f in elem.findall(util.nspath_eval(
'fes:Spatial_Capabilities/fes:SpatialOperators/fes:SpatialOperator', namespaces)):
self.spatial_operators.append(f.attrib['name'])
# Temporal_Capabilities
self.temporal_operands = [f.attrib.get('name') for f in elem.findall(util.nspath_eval(
'fes:Temporal_Capabilities/fes:TemporalOperands/fes:TemporalOperand', namespaces))]
self.temporal_operators = []
for f in elem.findall(util.nspath_eval(
'fes:Temporal_Capabilities/fes:TemporalOperators/fes:TemporalOperator', namespaces)):
self.temporal_operators.append(f.attrib['name'])
# Scalar_Capabilities
self.scalar_comparison_operators = [f.text for f in elem.findall(util.nspath_eval(
'fes:Scalar_Capabilities/fes:ComparisonOperators/fes:ComparisonOperator', namespaces))]
# Conformance
self.conformance = {}
for f in elem.findall(util.nspath_eval('fes:Conformance/fes:Constraint', namespaces)):
self.conformance[f.attrib.get('name')] = f.find(util.nspath_eval('ows110:DefaultValue', namespaces)).text
def setsortby(parent, propertyname, order='ASC'):
"""
constructs a SortBy element
Parameters
----------
- parent: parent etree.Element object
- propertyname: the ValueReference
- order: the SortOrder (default is 'ASC')
"""
tmp = etree.SubElement(parent, util.nspath_eval('fes:SortBy', namespaces))
tmp2 = etree.SubElement(tmp, util.nspath_eval('fes:SortProperty', namespaces))
etree.SubElement(tmp2, util.nspath_eval('fes:ValueReference', namespaces)).text = propertyname
etree.SubElement(tmp2, util.nspath_eval('fes:SortOrder', namespaces)).text = order
class SortProperty(object):
def __init__(self, propertyname, order='ASC'):
self.propertyname = propertyname
self.order = order.upper()
if self.order not in ['DESC', 'ASC']:
raise ValueError("SortOrder can only be 'ASC' or 'DESC'")
def toXML(self):
node0 = etree.Element(util.nspath_eval("fes:SortProperty", namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:SortOrder', namespaces)).text = self.order
return node0
class SortBy(object):
def __init__(self, properties):
self.properties = properties
def toXML(self):
node0 = etree.Element(util.nspath_eval("fes:SortBy", namespaces))
for prop in self.properties:
node0.append(prop.toXML())
return node0
class OgcExpression(object):
def __init__(self):
pass
class BinaryComparisonOpType(OgcExpression):
""" Super class of all the property operation classes"""
def __init__(self, propertyoperator, propertyname, literal, matchcase=True):
self.propertyoperator = propertyoperator
self.propertyname = propertyname
self.literal = literal
self.matchcase = matchcase
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.propertyoperator, namespaces))
if not self.matchcase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:Literal', namespaces)).text = self.literal
return node0
class PropertyIsEqualTo(BinaryComparisonOpType):
""" PropertyIsEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsEqualTo', propertyname, literal, matchcase)
class PropertyIsNotEqualTo(BinaryComparisonOpType):
""" PropertyIsNotEqualTo class """
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsNotEqualTo', propertyname, literal, matchcase)
class PropertyIsLessThan(BinaryComparisonOpType):
"""PropertyIsLessThan class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsLessThan', propertyname, literal, matchcase)
class PropertyIsGreaterThan(BinaryComparisonOpType):
"""PropertyIsGreaterThan class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsGreaterThan', propertyname, literal, matchcase)
class PropertyIsLessThanOrEqualTo(BinaryComparisonOpType):
"""PropertyIsLessThanOrEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsLessThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsGreaterThanOrEqualTo(BinaryComparisonOpType):
"""PropertyIsGreaterThanOrEqualTo class"""
def __init__(self, propertyname, literal, matchcase=True):
BinaryComparisonOpType.__init__(self, 'fes:PropertyIsGreaterThanOrEqualTo', propertyname, literal, matchcase)
class PropertyIsLike(OgcExpression):
"""PropertyIsLike class"""
def __init__(self, propertyname, literal, escapeChar='\\', singleChar='_', wildCard='%', matchCase=True):
self.propertyname = propertyname
self.literal = literal
self.escapeChar = escapeChar
self.singleChar = singleChar
self.wildCard = wildCard
self.matchCase = matchCase
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsLike', namespaces))
node0.set('wildCard', self.wildCard)
node0.set('singleChar', self.singleChar)
node0.set('escapeChar', self.escapeChar)
if not self.matchCase:
node0.set('matchCase', 'false')
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
etree.SubElement(node0, util.nspath_eval('fes:Literal', namespaces)).text = self.literal
return node0
class PropertyIsNull(OgcExpression):
"""PropertyIsNull class"""
def __init__(self, propertyname):
self.propertyname = propertyname
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsNull', namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
return node0
class PropertyIsBetween(OgcExpression):
"""PropertyIsBetween class"""
def __init__(self, propertyname, lower, upper):
self.propertyname = propertyname
self.lower = lower
self.upper = upper
def toXML(self):
node0 = etree.Element(util.nspath_eval('fes:PropertyIsBetween', namespaces))
etree.SubElement(node0, util.nspath_eval('fes:ValueReference', namespaces)).text = self.propertyname
node1 = etree.SubElement(node0, util.nspath_eval('fes:LowerBoundary', namespaces))
etree.SubElement(node1, util.nspath_eval('fes:Literal', namespaces)).text = '%s' % self.lower
node2 = etree.SubElement(node0, util.nspath_eval('fes:UpperBoundary', namespaces))
etree.SubElement(node2, util.nspath_eval('fes:Literal', namespaces)).text = '%s' % self.upper
return node0
class BBox(OgcExpression):
"""Construct a BBox, two pairs of coordinates (west-south and east-north)"""
def __init__(self, bbox, crs=None):
self.bbox = bbox
self.crs = crs
def toXML(self):
tmp = etree.Element(util.nspath_eval('fes:BBOX', namespaces))
etree.SubElement(tmp, util.nspath_eval('fes:ValueReference', namespaces)).text = 'ows:BoundingBox'
tmp2 = etree.SubElement(tmp, util.nspath_eval('gml:Envelope', namespaces))
if self.crs is not None:
tmp2.set('srsName', self.crs)
etree.SubElement(tmp2, util.nspath_eval('gml:lowerCorner', namespaces)).text = '{} {}'.format(
self.bbox[0], self.bbox[1])
etree.SubElement(tmp2, util.nspath_eval('gml:upperCorner', namespaces)).text = '{} {}'.format(
self.bbox[2], self.bbox[3])
return tmp
class Filter(OgcExpression):
def __init__(self, filter):
self.filter = filter
def toXML(self):
node = etree.Element(util.nspath_eval("fes:Filter", namespaces))
node.append(self.filter.toXML())
return node
class TopologicalOpType(OgcExpression, metaclass=ABCMeta):
"""Abstract base class for topological operators."""
@property
@abstractmethod
def operation(self):
"""This is a mechanism to ensure this class is subclassed by an actual operation."""
pass
def __init__(self, propertyname, geometry):
self.propertyname = propertyname
self.geometry = geometry
def toXML(self):
node = etree.Element(util.nspath_eval(f"fes:{self.operation}", namespaces))
etree.SubElement(node, util.nspath_eval("fes:ValueReference", namespaces)).text = self.propertyname
node.append(self.geometry.toXML())
return node
class Intersects(TopologicalOpType):
operation = "Intersects"
class Contains(TopologicalOpType):
operation = "Contains"
class Disjoint(TopologicalOpType):
operation = "Disjoint"
class Within(TopologicalOpType):
operation = "Within"
class Touches(TopologicalOpType):
operation = "Touches"
class Overlaps(TopologicalOpType):
operation = "Overlaps"
class Equals(TopologicalOpType):
operation = "Equals"
# BINARY
class BinaryLogicOpType(OgcExpression):
""" Binary Operators: And / Or """
def __init__(self, binary_operator, operations):
self.binary_operator = binary_operator
try:
assert len(operations) >= 2
self.operations = operations
except Exception:
raise ValueError("Binary operations (And / Or) require a minimum of two operations to operate against")
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.binary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class And(BinaryLogicOpType):
def __init__(self, operations):
super(And, self).__init__('fes:And', operations)
class Or(BinaryLogicOpType):
def __init__(self, operations):
super(Or, self).__init__('fes:Or', operations)
# UNARY
class UnaryLogicOpType(OgcExpression):
""" Unary Operator: Not """
def __init__(self, unary_operator, operations):
self.unary_operator = unary_operator
self.operations = operations
def toXML(self):
node0 = etree.Element(util.nspath_eval(self.unary_operator, namespaces))
for op in self.operations:
node0.append(op.toXML())
return node0
class Not(UnaryLogicOpType):
def __init__(self, operations):
super(Not, self).__init__('fes:Not', operations)
| {
"content_hash": "f9efb15b551320c34c250c098ccad55c",
"timestamp": "",
"source": "github",
"line_count": 488,
"max_line_length": 119,
"avg_line_length": 35.14139344262295,
"alnum_prop": 0.6330981398332264,
"repo_name": "geopython/OWSLib",
"id": "795104fd451102ab74a6f058c4e2dc425a73fbc9",
"size": "17466",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "owslib/fes2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "4639"
},
{
"name": "Makefile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "1158395"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from mc2.controllers.base import views
urlpatterns = patterns(
'',
url(
r'^add/$',
views.ControllerCreateView.as_view(),
name='add'
),
url(
r'^(?P<controller_pk>\d+)/$',
views.ControllerEditView.as_view(),
name='edit'),
url(
r'^restart/(?P<controller_pk>\d+)/$',
views.ControllerRestartView.as_view(),
name='restart'),
url(
r'^delete/(?P<controller_pk>\d+)/$',
views.ControllerDeleteView.as_view(),
name='delete'),
url(
r'^(?P<controller_pk>\d+)/clone/$',
views.ControllerCloneView.as_view(),
name='clone'),
url(
r'^logs/(?P<controller_pk>\d+)/$',
views.AppLogView.as_view(),
name='logs'),
url(
r'^logs/(?P<controller_pk>\d+)/(?P<task_id>[\w\.\-]+)/(?P<path>(stderr|stdout))/$', # noqa
views.MesosFileLogView.as_view(), name='mesos_file_log_view'),
url(
r'^exists/(?P<controller_pk>\d+)/$',
views.update_marathon_exists_json,
name='update_marathon_exists_json'),
url(
r'^restarthook/(?P<controller_pk>\d+)/(?P<token>[\w-]+)/$',
views.ControllerWebhookRestartView.as_view(),
name='webhook_restart'),
)
| {
"content_hash": "0625fa4ddac1eb2818114064800c7e0f",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 99,
"avg_line_length": 29.545454545454547,
"alnum_prop": 0.5423076923076923,
"repo_name": "praekelt/mc2",
"id": "21a39e5b39daebfd153c72549d3821cb8f748927",
"size": "1300",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "mc2/controllers/base/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "387033"
},
{
"name": "HTML",
"bytes": "1768863"
},
{
"name": "JavaScript",
"bytes": "2133408"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "281544"
},
{
"name": "Shell",
"bytes": "1163"
}
],
"symlink_target": ""
} |
import netaddr
import six
from sqlalchemy import func
import neutron.db.models_v2 as mod
NETWORK_ID = 'network_id'
NETWORK_NAME = 'network_name'
SUBNET_ID = 'subnet_id'
SUBNET_NAME = 'subnet_name'
SUPPORTED_FILTERS = {
NETWORK_ID: mod.Network.id,
NETWORK_NAME: mod.Network.name,
'tenant_id': mod.Network.tenant_id,
'ip_version': mod.Subnet.ip_version,
}
SUPPORTED_FILTER_KEYS = six.viewkeys(SUPPORTED_FILTERS)
class IpAvailabilityMixin(object):
"""Mixin class to query for IP availability."""
# Columns common to all queries
common_columns = [
mod.Network.id.label(NETWORK_ID),
mod.Subnet.id.label(SUBNET_ID),
mod.Subnet.cidr,
mod.Subnet.ip_version
]
# Columns for the network/subnet and used_ip counts
network_used_ips_columns = list(common_columns)
network_used_ips_columns.append(mod.Network.name.label(NETWORK_NAME))
network_used_ips_columns.append(mod.Network.tenant_id)
network_used_ips_columns.append(mod.Subnet.name.label(SUBNET_NAME))
# Aggregate query computed column
network_used_ips_computed_columns = [
func.count(mod.IPAllocation.subnet_id).label('used_ips')]
# Columns for total_ips query
total_ips_columns = list(common_columns)
total_ips_columns.append(mod.IPAllocationPool.first_ip)
total_ips_columns.append(mod.IPAllocationPool.last_ip)
@classmethod
def get_network_ip_availabilities(cls, context, filters=None):
"""Get IP availability stats on a per subnet basis.
Returns a list of network summaries which internally contains a list
of subnet summaries. The used_ip and total_ip counts are returned at
both levels.
"""
# Fetch total_ips by subnet
subnet_total_ips_dict = cls._generate_subnet_total_ips_dict(context,
filters)
# Query network/subnet data along with used IP counts
record_and_count_query = cls._build_network_used_ip_query(context,
filters)
# Assemble results
result_dict = {}
for row in record_and_count_query:
cls._add_result(row, result_dict,
subnet_total_ips_dict.get(row.subnet_id, 0))
# Convert result back into the list it expects
net_ip_availabilities = list(six.viewvalues(result_dict))
return net_ip_availabilities
@classmethod
def _build_network_used_ip_query(cls, context, filters):
# Generate a query to gather network/subnet/used_ips.
# Ensure query is tolerant of missing child table data (outerjoins)
# Process these outerjoin columns assuming their values may be None
query = context.session.query()
query = query.add_columns(*cls.network_used_ips_columns)
query = query.add_columns(*cls.network_used_ips_computed_columns)
query = query.outerjoin(mod.Subnet,
mod.Network.id == mod.Subnet.network_id)
query = query.outerjoin(mod.IPAllocation,
mod.Subnet.id == mod.IPAllocation.subnet_id)
query = query.group_by(*cls.network_used_ips_columns)
return cls._adjust_query_for_filters(query, filters)
@classmethod
def _build_total_ips_query(cls, context, filters):
query = context.session.query()
query = query.add_columns(*cls.total_ips_columns)
query = query.outerjoin(mod.Subnet,
mod.Network.id == mod.Subnet.network_id)
query = query.outerjoin(
mod.IPAllocationPool,
mod.Subnet.id == mod.IPAllocationPool.subnet_id)
return cls._adjust_query_for_filters(query, filters)
@classmethod
def _generate_subnet_total_ips_dict(cls, context, filters):
"""Generates a dict whose key=subnet_id, value=total_ips in subnet"""
# Query to get total_ips counts
total_ips_query = cls._build_total_ips_query(context, filters)
subnet_totals_dict = {}
for row in total_ips_query:
# Skip networks without subnets
if not row.subnet_id:
continue
# Add IPAllocationPool data
if row.last_ip:
pool_total = netaddr.IPRange(
netaddr.IPAddress(row.first_ip),
netaddr.IPAddress(row.last_ip)).size
cur_total = subnet_totals_dict.get(row.subnet_id, 0)
subnet_totals_dict[row.subnet_id] = cur_total + pool_total
else:
subnet_totals_dict[row.subnet_id] = netaddr.IPNetwork(
row.cidr, version=row.ip_version).size
return subnet_totals_dict
@classmethod
def _adjust_query_for_filters(cls, query, filters):
# The intersect of sets gets us applicable filter keys (others ignored)
common_keys = six.viewkeys(filters) & SUPPORTED_FILTER_KEYS
for key in common_keys:
filter_vals = filters[key]
if filter_vals:
query = query.filter(SUPPORTED_FILTERS[key].in_(filter_vals))
return query
@classmethod
def _add_result(cls, db_row, result_dict, subnet_total_ips):
# Find network in results. Create and add if missing
if db_row.network_id in result_dict:
network = result_dict[db_row.network_id]
else:
network = {NETWORK_ID: db_row.network_id,
NETWORK_NAME: db_row.network_name,
'tenant_id': db_row.tenant_id,
'subnet_ip_availability': [],
'used_ips': 0, 'total_ips': 0}
result_dict[db_row.network_id] = network
# Only add subnet data if outerjoin rows have it
if db_row.subnet_id:
cls._add_subnet_data_to_net(db_row, network, subnet_total_ips)
@classmethod
def _add_subnet_data_to_net(cls, db_row, network_dict, subnet_total_ips):
subnet = {
SUBNET_ID: db_row.subnet_id,
'ip_version': db_row.ip_version,
'cidr': db_row.cidr,
SUBNET_NAME: db_row.subnet_name,
'used_ips': db_row.used_ips if db_row.used_ips else 0,
'total_ips': subnet_total_ips
}
# Attach subnet result and rollup subnet sums into the parent
network_dict['subnet_ip_availability'].append(subnet)
network_dict['total_ips'] += subnet['total_ips']
network_dict['used_ips'] += subnet['used_ips']
| {
"content_hash": "dd7bd076e65bdd33086beea208ff0424",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 79,
"avg_line_length": 40.41463414634146,
"alnum_prop": 0.6071213035606517,
"repo_name": "MaximNevrov/neutron",
"id": "4855be2c9b2f093104ef4c4b714fb6e9ccfacab1",
"size": "7212",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "neutron/db/network_ip_availability_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "8742123"
},
{
"name": "Shell",
"bytes": "14781"
}
],
"symlink_target": ""
} |
"""Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically
'FW' : Floyd-Warshall algorithm
'D' : Dijkstra algorithm with Fibonacci Heaps
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
`embedding_` : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
`kernel_pca_` : object
`KernelPCA` object used to implement the embedding.
`training_data_` : array-like, shape (n_samples, n_features)
Stores the training data.
`nbrs_` : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
`dist_matrix_` : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
[1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| {
"content_hash": "a324f6e402390ba0b3fa57dd4942f02c",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 79,
"avg_line_length": 35.029411764705884,
"alnum_prop": 0.5917996081724042,
"repo_name": "eickenberg/scikit-learn",
"id": "3354c36db8447bb599e33a4ca62ed40b56ea73cc",
"size": "7146",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/manifold/isomap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18332579"
},
{
"name": "C++",
"bytes": "1807769"
},
{
"name": "CSS",
"bytes": "0"
},
{
"name": "JavaScript",
"bytes": "22292"
},
{
"name": "PowerShell",
"bytes": "13064"
},
{
"name": "Python",
"bytes": "5279930"
},
{
"name": "Shell",
"bytes": "8108"
}
],
"symlink_target": ""
} |
import Gaffer
## The CompoundPathFilter class simply combines a number of other
# PathFilters, applying them in sequence.
class CompoundPathFilter( Gaffer.PathFilter ) :
def __init__( self, filters=[], userData={} ) :
Gaffer.PathFilter.__init__( self, userData )
self.__filters = None
self.__changedConnections = []
self.setFilters( filters )
def addFilter( self, filter ) :
assert( filter not in self.__filters )
self.__filters.append( filter )
self.__changedConnections.append( filter.changedSignal().connect( Gaffer.WeakMethod( self.__filterChanged ) ) )
if self.getEnabled() :
self.changedSignal()( self )
def removeFilter( self, filter ) :
index = self.__filters.index( filter )
del self.__filters[index]
del self.__changedConnections[index]
if self.getEnabled() :
self.changedSignal()( self )
def setFilters( self, filters ) :
assert( type( filters ) is list )
if filters == self.__filters :
return
# copy list so it can't be changed behind our back
self.__filters = list( filters )
# update changed connections
self.__changedConnections = [ f.changedSignal().connect( Gaffer.WeakMethod( self.__filterChanged ) ) for f in self.__filters ]
if self.getEnabled() :
self.changedSignal()( self )
def getFilters( self ) :
# return a copy so the list can't be changed behind our back
return list( self.__filters )
def _filter( self, paths ) :
for f in self.__filters :
paths = f.filter( paths )
return paths
def __filterChanged( self, childFilter ) :
assert( childFilter in self.__filters )
if self.getEnabled() :
self.changedSignal()( self ) | {
"content_hash": "92cac9422f1713bf05791a4463296527",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 128,
"avg_line_length": 25.348484848484848,
"alnum_prop": 0.6700537955768081,
"repo_name": "davidsminor/gaffer",
"id": "e70897011e4a1d277a67765cc5a7ed6e6a8af773",
"size": "3490",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/Gaffer/CompoundPathFilter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9286"
},
{
"name": "C++",
"bytes": "3358250"
},
{
"name": "COBOL",
"bytes": "64449"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "Python",
"bytes": "3267354"
},
{
"name": "Shell",
"bytes": "7055"
},
{
"name": "Slash",
"bytes": "35200"
}
],
"symlink_target": ""
} |
from ...Helpers.commands import Nop
def skip_statement(commands, data, node):
""" Компиляция оператора пропуска команды """
commands.add(Nop)
| {
"content_hash": "a5e0251cf373e75e6f97ac99d0aee13f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 49,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.7105263157894737,
"repo_name": "PetukhovVictor/compiler",
"id": "ae690d7321a0373a0cc07f00abdf3bdf29114af5",
"size": "211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Compiler/VM/Codegen/statements/skip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "16410"
},
{
"name": "Python",
"bytes": "239647"
},
{
"name": "Shell",
"bytes": "109"
}
],
"symlink_target": ""
} |
r"""
Punkt Sentence Tokenizer
This tokenizer divides a text into a list of sentences,
by using an unsupervised algorithm to build a model for abbreviation
words, collocations, and words that start sentences. It must be
trained on a large collection of plaintext in the taret language
before it can be used.
The NLTK data package includes a pre-trained Punkt tokenizer for
English.
>>> import nltk.data
>>> text = '''
... Punkt knows that the periods in Mr. Smith and Johann S. Bach
... do not mark sentence boundaries. And sometimes sentences
... can start with non-capitalized words. i is a good variable
... name.
... '''
>>> sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
>>> print '\n-----\n'.join(sent_detector.tokenize(text.strip()))
Punkt knows that the periods in Mr. Smith and Johann S. Bach
do not mark sentence boundaries.
-----
And sometimes sentences
can start with non-capitalized words.
-----
i is a good variable
name.
(Note that whitespace from the original text, including newlines, is
retained in the output.)
Punctuation following sentences can be included with the realign_boundaries
flag:
>>> text = '''
... (How does it deal with this parenthesis?) "It should be part of the
... previous sentence."
... '''
>>> print '\n-----\n'.join(
... sent_detector.tokenize(text.strip(), realign_boundaries=True))
(How does it deal with this parenthesis?)
-----
"It should be part of the
previous sentence."
However, Punkt is designed to learn parameters (a list of abbreviations, etc.)
unsupervised from a corpus similar to the target domain. The pre-packaged models
may therefore be unsuitable: use ``PunktSentenceTokenizer(text)`` to learn
parameters from the given text.
:class:`.PunktTrainer` learns parameters such as a list of abbreviations
(without supervision) from portions of text. Using a ``PunktTrainer`` directly
allows for incremental training and modification of the hyper-parameters used
to decide what is considered an abbreviation, etc.
:class:`.PunktWordTokenizer` uses a regular expression to divide a text into tokens,
leaving all periods attached to words, but separating off other punctuation:
>>> from nltk.tokenize.punkt import PunktWordTokenizer
>>> s = "Good muffins cost $3.88\nin New York. Please buy me\ntwo of them.\n\nThanks."
>>> PunktWordTokenizer().tokenize(s)
['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.', 'Please',
'buy', 'me', 'two', 'of', 'them.', 'Thanks.']
The algorithm for this tokenizer is described in::
Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence
Boundary Detection. Computational Linguistics 32: 485-525.
"""
# TODO: Make orthographic heuristic less susceptible to overtraining
# TODO: Frequent sentence starters optionally exclude always-capitalised words
# FIXME: Problem with ending string with e.g. '!!!' -> '!! !'
from __future__ import print_function
import re
import math
from collections import defaultdict
from nltk.probability import FreqDist
from nltk.tokenize.api import TokenizerI
######################################################################
#{ Orthographic Context Constants
######################################################################
# The following constants are used to describe the orthographic
# contexts in which a word can occur. BEG=beginning, MID=middle,
# UNK=unknown, UC=uppercase, LC=lowercase, NC=no case.
_ORTHO_BEG_UC = 1 << 1
"""Orthographic context: beginning of a sentence with upper case."""
_ORTHO_MID_UC = 1 << 2
"""Orthographic context: middle of a sentence with upper case."""
_ORTHO_UNK_UC = 1 << 3
"""Orthographic context: unknown position in a sentence with upper case."""
_ORTHO_BEG_LC = 1 << 4
"""Orthographic context: beginning of a sentence with lower case."""
_ORTHO_MID_LC = 1 << 5
"""Orthographic context: middle of a sentence with lower case."""
_ORTHO_UNK_LC = 1 << 6
"""Orthographic context: unknown position in a sentence with lower case."""
_ORTHO_UC = _ORTHO_BEG_UC + _ORTHO_MID_UC + _ORTHO_UNK_UC
"""Orthographic context: occurs with upper case."""
_ORTHO_LC = _ORTHO_BEG_LC + _ORTHO_MID_LC + _ORTHO_UNK_LC
"""Orthographic context: occurs with lower case."""
_ORTHO_MAP = {
('initial', 'upper'): _ORTHO_BEG_UC,
('internal', 'upper'): _ORTHO_MID_UC,
('unknown', 'upper'): _ORTHO_UNK_UC,
('initial', 'lower'): _ORTHO_BEG_LC,
('internal', 'lower'): _ORTHO_MID_LC,
('unknown', 'lower'): _ORTHO_UNK_LC,
}
"""A map from context position and first-letter case to the
appropriate orthographic context flag."""
#} (end orthographic context constants)
######################################################################
######################################################################
#{ Decision reasons for debugging
######################################################################
REASON_DEFAULT_DECISION = 'default decision'
REASON_KNOWN_COLLOCATION = 'known collocation (both words)'
REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC = 'abbreviation + orthographic heuristic'
REASON_ABBR_WITH_SENTENCE_STARTER = 'abbreviation + frequent sentence starter'
REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC = 'initial + orthographic heuristic'
REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC = 'initial + orthographic heuristic'
REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC = 'initial + special orthographic heuristic'
#} (end decision reasons for debugging)
######################################################################
######################################################################
#{ Language-dependent variables
######################################################################
class PunktLanguageVars(object):
"""
Stores variables, mostly regular expressions, which may be
language-dependent for correct application of the algorithm.
An extension of this class may modify its properties to suit
a language other than English; an instance can then be passed
as an argument to PunktSentenceTokenizer and PunktTrainer
constructors.
"""
__slots__ = ('_re_period_context', '_re_word_tokenizer')
def __getstate__(self):
# All modifications to the class are performed by inheritance.
# Non-default parameters to be pickled must be defined in the inherited
# class.
return 1
def __setstate__(self, state):
return 1
sent_end_chars = ('.', '?', '!')
"""Characters which are candidates for sentence boundaries"""
@property
def _re_sent_end_chars(self):
return '[%s]' % re.escape(''.join(self.sent_end_chars))
internal_punctuation = ',:;' # might want to extend this..
"""sentence internal punctuation, which indicates an abbreviation if
preceded by a period-final token."""
re_boundary_realignment = re.compile(r'["\')\]}]+?(?:\s+|(?=--)|$)',
re.MULTILINE)
"""Used to realign punctuation that should be included in a sentence
although it follows the period (or ?, !)."""
_re_word_start = r"[^\(\"\`{\[:;&\#\*@\)}\]\-,]"
"""Excludes some characters from starting word tokens"""
_re_non_word_chars = r"(?:[?!)\";}\]\*:@\'\({\[])"
"""Characters that cannot appear within words"""
_re_multi_char_punct = r"(?:\-{2,}|\.{2,}|(?:\.\s){2,}\.)"
"""Hyphen and ellipsis are multi-character punctuation"""
_word_tokenize_fmt = r'''(
%(MultiChar)s
|
(?=%(WordStart)s)\S+? # Accept word characters until end is found
(?= # Sequences marking a word's end
\s| # White-space
$| # End-of-string
%(NonWord)s|%(MultiChar)s| # Punctuation
,(?=$|\s|%(NonWord)s|%(MultiChar)s) # Comma if at end of word
)
|
\S
)'''
"""Format of a regular expression to split punctuation from words,
excluding period."""
def _word_tokenizer_re(self):
"""Compiles and returns a regular expression for word tokenization"""
try:
return self._re_word_tokenizer
except AttributeError:
self._re_word_tokenizer = re.compile(
self._word_tokenize_fmt %
{
'NonWord': self._re_non_word_chars,
'MultiChar': self._re_multi_char_punct,
'WordStart': self._re_word_start,
},
re.UNICODE | re.VERBOSE
)
return self._re_word_tokenizer
def word_tokenize(self, s):
"""Tokenize a string to split off punctuation other than periods"""
return self._word_tokenizer_re().findall(s)
_period_context_fmt = r"""
\S* # some word material
%(SentEndChars)s # a potential sentence ending
(?=(?P<after_tok>
%(NonWord)s # either other punctuation
|
\s+(?P<next_tok>\S+) # or whitespace and some other token
))"""
"""Format of a regular expression to find contexts including possible
sentence boundaries. Matches token which the possible sentence boundary
ends, and matches the following token within a lookahead expression."""
def period_context_re(self):
"""Compiles and returns a regular expression to find contexts
including possible sentence boundaries."""
try:
return self._re_period_context
except:
self._re_period_context = re.compile(
self._period_context_fmt %
{
'NonWord': self._re_non_word_chars,
'SentEndChars': self._re_sent_end_chars,
},
re.UNICODE | re.VERBOSE)
return self._re_period_context
_re_non_punct = re.compile(r'[^\W\d]', re.UNICODE)
"""Matches token types that are not merely punctuation. (Types for
numeric tokens are changed to ##number## and hence contain alpha.)"""
#}
######################################################################
######################################################################
#{ Punkt Word Tokenizer
######################################################################
class PunktWordTokenizer(TokenizerI):
# Retained for backward compatibility
def __init__(self, lang_vars=PunktLanguageVars()):
self._lang_vars = lang_vars
def tokenize(self, text):
return self._lang_vars.word_tokenize(text)
#}
######################################################################
#////////////////////////////////////////////////////////////
#{ Helper Functions
#////////////////////////////////////////////////////////////
def _pair_iter(it):
"""
Yields pairs of tokens from the given iterator such that each input
token will appear as the first element in a yielded tuple. The last
pair will have None as its second element.
"""
it = iter(it)
prev = it.next()
for el in it:
yield (prev, el)
prev = el
yield (prev, None)
######################################################################
#{ Punkt Parameters
######################################################################
class PunktParameters(object):
"""Stores data used to perform sentence boundary detection with Punkt."""
def __init__(self):
self.abbrev_types = set()
"""A set of word types for known abbreviations."""
self.collocations = set()
"""A set of word type tuples for known common collocations
where the first word ends in a period. E.g., ('S.', 'Bach')
is a common collocation in a text that discusses 'Johann
S. Bach'. These count as negative evidence for sentence
boundaries."""
self.sent_starters = set()
"""A set of word types for words that often appear at the
beginning of sentences."""
self.ortho_context = defaultdict(int)
"""A dictionary mapping word types to the set of orthographic
contexts that word type appears in. Contexts are represented
by adding orthographic context flags: ..."""
def clear_abbrevs(self):
self.abbrev_types = set()
def clear_collocations(self):
self.collocations = set()
def clear_sent_starters(self):
self.sent_starters = set()
def clear_ortho_context(self):
self.ortho_context = defaultdict(int)
def add_ortho_context(self, typ, flag):
self.ortho_context[typ] |= flag
def _debug_ortho_context(self, typ):
c = self.ortho_context[typ]
if c & _ORTHO_BEG_UC:
yield 'BEG-UC'
if c & _ORTHO_MID_UC:
yield 'MID-UC'
if c & _ORTHO_UNK_UC:
yield 'UNK-UC'
if c & _ORTHO_BEG_LC:
yield 'BEG-LC'
if c & _ORTHO_MID_LC:
yield 'MID-LC'
if c & _ORTHO_UNK_LC:
yield 'UNK-LC'
######################################################################
#{ PunktToken
######################################################################
class PunktToken(object):
"""Stores a token of text with annotations produced during
sentence boundary detection."""
_properties = [
'parastart', 'linestart',
'sentbreak', 'abbr', 'ellipsis'
]
__slots__ = ['tok', 'type', 'period_final'] + _properties
def __init__(self, tok, **params):
self.tok = tok
self.type = self._get_type(tok)
self.period_final = tok.endswith('.')
for p in self._properties:
setattr(self, p, None)
for k, v in params.iteritems():
setattr(self, k, v)
#////////////////////////////////////////////////////////////
#{ Regular expressions for properties
#////////////////////////////////////////////////////////////
# Note: [A-Za-z] is approximated by [^\W\d] in the general case.
_RE_ELLIPSIS = re.compile(r'\.\.+$')
_RE_NUMERIC = re.compile(r'^-?[\.,]?\d[\d,\.-]*\.?$')
_RE_INITIAL = re.compile(r'[^\W\d]\.$', re.UNICODE)
_RE_ALPHA = re.compile(r'[^\W\d]+$', re.UNICODE)
#////////////////////////////////////////////////////////////
#{ Derived properties
#////////////////////////////////////////////////////////////
def _get_type(self, tok):
"""Returns a case-normalized representation of the token."""
return self._RE_NUMERIC.sub('##number##', tok.lower())
@property
def type_no_period(self):
"""
The type with its final period removed if it has one.
"""
if len(self.type) > 1 and self.type[-1] == '.':
return self.type[:-1]
return self.type
@property
def type_no_sentperiod(self):
"""
The type with its final period removed if it is marked as a
sentence break.
"""
if self.sentbreak:
return self.type_no_period
return self.type
@property
def first_upper(self):
"""True if the token's first character is uppercase."""
return self.tok[0].isupper()
@property
def first_lower(self):
"""True if the token's first character is lowercase."""
return self.tok[0].islower()
@property
def first_case(self):
if self.first_lower:
return 'lower'
elif self.first_upper:
return 'upper'
return 'none'
@property
def is_ellipsis(self):
"""True if the token text is that of an ellipsis."""
return self._RE_ELLIPSIS.match(self.tok)
@property
def is_number(self):
"""True if the token text is that of a number."""
return self.type.startswith('##number##')
@property
def is_initial(self):
"""True if the token text is that of an initial."""
return self._RE_INITIAL.match(self.tok)
@property
def is_alpha(self):
"""True if the token text is all alphabetic."""
return self._RE_ALPHA.match(self.tok)
@property
def is_non_punct(self):
"""True if the token is either a number or is alphabetic."""
return _re_non_punct.search(self.type)
#////////////////////////////////////////////////////////////
#{ String representation
#////////////////////////////////////////////////////////////
def __repr__(self):
"""
A string representation of the token that can reproduce it
with eval(), which lists all the token's non-default
annotations.
"""
typestr = (' type=%s,' % repr(self.type)
if self.type != self.tok else '')
propvals = ', '.join(
'%s=%s' % (p, repr(getattr(self, p)))
for p in self._properties
if getattr(self, p)
)
return '%s(%s,%s %s)' % (self.__class__.__name__,
repr(self.tok), typestr, propvals)
def __str__(self):
"""
A string representation akin to that used by Kiss and Strunk.
"""
res = self.tok
if self.abbr:
res += '<A>'
if self.ellipsis:
res += '<E>'
if self.sentbreak:
res += '<S>'
return res
######################################################################
#{ Punkt base class
######################################################################
class PunktBaseClass(object):
"""
Includes common components of PunktTrainer and PunktSentenceTokenizer.
"""
def __init__(self, lang_vars=PunktLanguageVars(), token_cls=PunktToken,
params=PunktParameters()):
self._params = params
self._lang_vars = lang_vars
self._Token = token_cls
"""The collection of parameters that determines the behavior
of the punkt tokenizer."""
#////////////////////////////////////////////////////////////
#{ Word tokenization
#////////////////////////////////////////////////////////////
def _tokenize_words(self, plaintext):
"""
Divide the given text into tokens, using the punkt word
segmentation regular expression, and generate the resulting list
of tokens augmented as three-tuples with two boolean values for whether
the given token occurs at the start of a paragraph or a new line,
respectively.
"""
parastart = False
for line in plaintext.split('\n'):
if line.strip():
line_toks = iter(self._lang_vars.word_tokenize(line))
yield self._Token(line_toks.next(),
parastart=parastart, linestart=True)
parastart = False
for t in line_toks:
yield self._Token(t)
else:
parastart = True
#////////////////////////////////////////////////////////////
#{ Annotation Procedures
#////////////////////////////////////////////////////////////
def _annotate_first_pass(self, tokens):
"""
Perform the first pass of annotation, which makes decisions
based purely based on the word type of each word:
- '?', '!', and '.' are marked as sentence breaks.
- sequences of two or more periods are marked as ellipsis.
- any word ending in '.' that's a known abbreviation is
marked as an abbreviation.
- any other word ending in '.' is marked as a sentence break.
Return these annotations as a tuple of three sets:
- sentbreak_toks: The indices of all sentence breaks.
- abbrev_toks: The indices of all abbreviations.
- ellipsis_toks: The indices of all ellipsis marks.
"""
for aug_tok in tokens:
self._first_pass_annotation(aug_tok)
yield aug_tok
def _first_pass_annotation(self, aug_tok):
"""
Performs type-based annotation on a single token.
"""
tok = aug_tok.tok
if tok in self._lang_vars.sent_end_chars:
aug_tok.sentbreak = True
elif aug_tok.is_ellipsis:
aug_tok.ellipsis = True
elif aug_tok.period_final and not tok.endswith('..'):
if (tok[:-1].lower() in self._params.abbrev_types or
tok[:-1].lower().split('-')[-1] in self._params.abbrev_types):
aug_tok.abbr = True
else:
aug_tok.sentbreak = True
return
######################################################################
#{ Punkt Trainer
######################################################################
class PunktTrainer(PunktBaseClass):
"""Learns parameters used in Punkt sentence boundary detection."""
def __init__(self, train_text=None, verbose=False,
lang_vars=PunktLanguageVars(), token_cls=PunktToken):
PunktBaseClass.__init__(self, lang_vars=lang_vars,
token_cls=token_cls)
self._type_fdist = FreqDist()
"""A frequency distribution giving the frequency of each
case-normalized token type in the training data."""
self._num_period_toks = 0
"""The number of words ending in period in the training data."""
self._collocation_fdist = FreqDist()
"""A frequency distribution giving the frequency of all
bigrams in the training data where the first word ends in a
period. Bigrams are encoded as tuples of word types.
Especially common collocations are extracted from this
frequency distribution, and stored in
``_params``.``collocations <PunktParameters.collocations>``."""
self._sent_starter_fdist = FreqDist()
"""A frequency distribution giving the frequency of all words
that occur at the training data at the beginning of a sentence
(after the first pass of annotation). Especially common
sentence starters are extracted from this frequency
distribution, and stored in ``_params.sent_starters``.
"""
self._sentbreak_count = 0
"""The total number of sentence breaks identified in training, used for
calculating the frequent sentence starter heuristic."""
self._finalized = True
"""A flag as to whether the training has been finalized by finding
collocations and sentence starters, or whether finalize_training()
still needs to be called."""
if train_text:
self.train(train_text, verbose, finalize=True)
def get_params(self):
"""
Calculates and returns parameters for sentence boundary detection as
derived from training."""
if not self._finalized:
self.finalize_training()
return self._params
#////////////////////////////////////////////////////////////
#{ Customization Variables
#////////////////////////////////////////////////////////////
ABBREV = 0.3
"""cut-off value whether a 'token' is an abbreviation"""
IGNORE_ABBREV_PENALTY = False
"""allows the disabling of the abbreviation penalty heuristic, which
exponentially disadvantages words that are found at times without a
final period."""
ABBREV_BACKOFF = 5
"""upper cut-off for Mikheev's(2002) abbreviation detection algorithm"""
COLLOCATION = 7.88
"""minimal log-likelihood value that two tokens need to be considered
as a collocation"""
SENT_STARTER = 30
"""minimal log-likelihood value that a token requires to be considered
as a frequent sentence starter"""
INCLUDE_ALL_COLLOCS = False
"""this includes as potential collocations all word pairs where the first
word ends in a period. It may be useful in corpora where there is a lot
of variation that makes abbreviations like Mr difficult to identify."""
INCLUDE_ABBREV_COLLOCS = False
"""this includes as potential collocations all word pairs where the first
word is an abbreviation. Such collocations override the orthographic
heuristic, but not the sentence starter heuristic. This is overridden by
INCLUDE_ALL_COLLOCS, and if both are false, only collocations with initials
and ordinals are considered."""
""""""
MIN_COLLOC_FREQ = 1
"""this sets a minimum bound on the number of times a bigram needs to
appear before it can be considered a collocation, in addition to log
likelihood statistics. This is useful when INCLUDE_ALL_COLLOCS is True."""
#////////////////////////////////////////////////////////////
#{ Training..
#////////////////////////////////////////////////////////////
def train(self, text, verbose=False, finalize=True):
"""
Collects training data from a given text. If finalize is True, it
will determine all the parameters for sentence boundary detection. If
not, this will be delayed until get_params() or finalize_training() is
called. If verbose is True, abbreviations found will be listed.
"""
# Break the text into tokens; record which token indices correspond to
# line starts and paragraph starts; and determine their types.
self._train_tokens(self._tokenize_words(text), verbose)
if finalize:
self.finalize_training(verbose)
def train_tokens(self, tokens, verbose=False, finalize=True):
"""
Collects training data from a given list of tokens.
"""
self._train_tokens((self._Token(t) for t in tokens), verbose)
if finalize:
self.finalize_training(verbose)
def _train_tokens(self, tokens, verbose):
self._finalized = False
# Ensure tokens are a list
tokens = list(tokens)
# Find the frequency of each case-normalized type. (Don't
# strip off final periods.) Also keep track of the number of
# tokens that end in periods.
for aug_tok in tokens:
self._type_fdist.inc(aug_tok.type)
if aug_tok.period_final:
self._num_period_toks += 1
# Look for new abbreviations, and for types that no longer are
unique_types = self._unique_types(tokens)
for abbr, score, is_add in self._reclassify_abbrev_types(unique_types):
if score >= self.ABBREV:
if is_add:
self._params.abbrev_types.add(abbr)
if verbose:
print((' Abbreviation: [%6.4f] %s' %
(score, abbr)))
else:
if not is_add:
self._params.abbrev_types.remove(abbr)
if verbose:
print((' Removed abbreviation: [%6.4f] %s' %
(score, abbr)))
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = list(self._annotate_first_pass(tokens))
# Check what contexts each word type can appear in, given the
# case of its first letter.
self._get_orthography_data(tokens)
# We need total number of sentence breaks to find sentence starters
self._sentbreak_count += self._get_sentbreak_count(tokens)
# The remaining heuristics relate to pairs of tokens where the first
# ends in a period.
for aug_tok1, aug_tok2 in _pair_iter(tokens):
if not aug_tok1.period_final or not aug_tok2:
continue
# Is the first token a rare abbreviation?
if self._is_rare_abbrev_type(aug_tok1, aug_tok2):
self._params.abbrev_types.add(aug_tok1.type_no_period)
if verbose:
print((' Rare Abbrev: %s' % aug_tok1.type))
# Does second token have a high likelihood of starting a sentence?
if self._is_potential_sent_starter(aug_tok2, aug_tok1):
self._sent_starter_fdist.inc(aug_tok2.type)
# Is this bigram a potential collocation?
if self._is_potential_collocation(aug_tok1, aug_tok2):
self._collocation_fdist.inc(
(aug_tok1.type_no_period, aug_tok2.type_no_sentperiod))
def _unique_types(self, tokens):
return set(aug_tok.type for aug_tok in tokens)
def finalize_training(self, verbose=False):
"""
Uses data that has been gathered in training to determine likely
collocations and sentence starters.
"""
self._params.clear_sent_starters()
for typ, ll in self._find_sent_starters():
self._params.sent_starters.add(typ)
if verbose:
print((' Sent Starter: [%6.4f] %r' % (ll, typ)))
self._params.clear_collocations()
for (typ1, typ2), ll in self._find_collocations():
self._params.collocations.add( (typ1,typ2) )
if verbose:
print((' Collocation: [%6.4f] %r+%r' %
(ll, typ1, typ2)))
self._finalized = True
#////////////////////////////////////////////////////////////
#{ Overhead reduction
#////////////////////////////////////////////////////////////
def freq_threshold(self, ortho_thresh=2, type_thresh=2, colloc_thres=2,
sentstart_thresh=2):
"""
Allows memory use to be reduced after much training by removing data
about rare tokens that are unlikely to have a statistical effect with
further training. Entries occurring above the given thresholds will be
retained.
"""
if ortho_thresh > 1:
old_oc = self._params.ortho_context
self._params.clear_ortho_context()
for tok, count in self._type_fdist.iteritems():
if count >= ortho_thresh:
self._params.ortho_context[tok] = old_oc[tok]
self._type_fdist = self._freq_threshold(self._type_fdist, type_thresh)
self._collocation_fdist = self._freq_threshold(
self._collocation_fdist, colloc_thres)
self._sent_starter_fdist = self._freq_threshold(
self._sent_starter_fdist, sentstart_thresh)
def _freq_threshold(self, fdist, threshold):
"""
Returns a FreqDist containing only data with counts below a given
threshold, as well as a mapping (None -> count_removed).
"""
# We assume that there is more data below the threshold than above it
# and so create a new FreqDist rather than working in place.
res = FreqDist()
num_removed = 0
for tok, count in fdist.iteritems():
if count < threshold:
num_removed += 1
else:
res.inc(tok, count)
res.inc(None, num_removed)
return res
#////////////////////////////////////////////////////////////
#{ Orthographic data
#////////////////////////////////////////////////////////////
def _get_orthography_data(self, tokens):
"""
Collect information about whether each token type occurs
with different case patterns (i) overall, (ii) at
sentence-initial positions, and (iii) at sentence-internal
positions.
"""
# 'initial' or 'internal' or 'unknown'
context = 'internal'
tokens = list(tokens)
for aug_tok in tokens:
# If we encounter a paragraph break, then it's a good sign
# that it's a sentence break. But err on the side of
# caution (by not positing a sentence break) if we just
# saw an abbreviation.
if aug_tok.parastart and context != 'unknown':
context = 'initial'
# If we're at the beginning of a line, then err on the
# side of calling our context 'initial'.
if aug_tok.linestart and context == 'internal':
context = 'unknown'
# Find the case-normalized type of the token. If it's a
# sentence-final token, strip off the period.
typ = aug_tok.type_no_sentperiod
# Update the orthographic context table.
flag = _ORTHO_MAP.get((context, aug_tok.first_case), 0)
if flag:
self._params.add_ortho_context(typ, flag)
# Decide whether the next word is at a sentence boundary.
if aug_tok.sentbreak:
if not (aug_tok.is_number or aug_tok.is_initial):
context = 'initial'
else:
context = 'unknown'
elif aug_tok.ellipsis or aug_tok.abbr:
context = 'unknown'
else:
context = 'internal'
#////////////////////////////////////////////////////////////
#{ Abbreviations
#////////////////////////////////////////////////////////////
def _reclassify_abbrev_types(self, types):
"""
(Re)classifies each given token if
- it is period-final and not a known abbreviation; or
- it is not period-final and is otherwise a known abbreviation
by checking whether its previous classification still holds according
to the heuristics of section 3.
Yields triples (abbr, score, is_add) where abbr is the type in question,
score is its log-likelihood with penalties applied, and is_add specifies
whether the present type is a candidate for inclusion or exclusion as an
abbreviation, such that:
- (is_add and score >= 0.3) suggests a new abbreviation; and
- (not is_add and score < 0.3) suggests excluding an abbreviation.
"""
# (While one could recalculate abbreviations from all .-final tokens at
# every iteration, in cases requiring efficiency, the number of tokens
# in the present training document will be much less.)
for typ in types:
# Check some basic conditions, to rule out words that are
# clearly not abbrev_types.
if not _re_non_punct.search(typ) or typ == '##number##':
continue
if typ.endswith('.'):
if typ in self._params.abbrev_types:
continue
typ = typ[:-1]
is_add = True
else:
if typ not in self._params.abbrev_types:
continue
is_add = False
# Count how many periods & nonperiods are in the
# candidate.
num_periods = typ.count('.') + 1
num_nonperiods = len(typ) - num_periods + 1
# Let <a> be the candidate without the period, and <b>
# be the period. Find a log likelihood ratio that
# indicates whether <ab> occurs as a single unit (high
# value of ll), or as two independent units <a> and
# <b> (low value of ll).
count_with_period = self._type_fdist[typ + '.']
count_without_period = self._type_fdist[typ]
ll = self._dunning_log_likelihood(
count_with_period + count_without_period,
self._num_period_toks, count_with_period,
self._type_fdist.N())
# Apply three scaling factors to 'tweak' the basic log
# likelihood ratio:
# F_length: long word -> less likely to be an abbrev
# F_periods: more periods -> more likely to be an abbrev
# F_penalty: penalize occurrences w/o a period
f_length = math.exp(-num_nonperiods)
f_periods = num_periods
f_penalty = (int(self.IGNORE_ABBREV_PENALTY)
or math.pow(num_nonperiods, -count_without_period))
score = ll * f_length * f_periods * f_penalty
yield typ, score, is_add
def find_abbrev_types(self):
"""
Recalculates abbreviations given type frequencies, despite no prior
determination of abbreviations.
This fails to include abbreviations otherwise found as "rare".
"""
self._params.clear_abbrevs()
tokens = (typ for typ in self._type_fdist if typ and typ.endswith('.'))
for abbr, score, is_add in self._reclassify_abbrev_types(tokens):
if score >= self.ABBREV:
self._params.abbrev_types.add(abbr)
# This function combines the work done by the original code's
# functions `count_orthography_context`, `get_orthography_count`,
# and `get_rare_abbreviations`.
def _is_rare_abbrev_type(self, cur_tok, next_tok):
"""
A word type is counted as a rare abbreviation if...
- it's not already marked as an abbreviation
- it occurs fewer than ABBREV_BACKOFF times
- either it is followed by a sentence-internal punctuation
mark, *or* it is followed by a lower-case word that
sometimes appears with upper case, but never occurs with
lower case at the beginning of sentences.
"""
if cur_tok.abbr or not cur_tok.sentbreak:
return False
# Find the case-normalized type of the token. If it's
# a sentence-final token, strip off the period.
typ = cur_tok.type_no_sentperiod
# Proceed only if the type hasn't been categorized as an
# abbreviation already, and is sufficiently rare...
count = self._type_fdist[typ] + self._type_fdist[typ[:-1]]
if (typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF):
return False
# Record this token as an abbreviation if the next
# token is a sentence-internal punctuation mark.
# [XX] :1 or check the whole thing??
if next_tok.tok[:1] in self._lang_vars.internal_punctuation:
return True
# Record this type as an abbreviation if the next
# token... (i) starts with a lower case letter,
# (ii) sometimes occurs with an uppercase letter,
# and (iii) never occus with an uppercase letter
# sentence-internally.
# [xx] should the check for (ii) be modified??
elif next_tok.first_lower:
typ2 = next_tok.type_no_sentperiod
typ2ortho_context = self._params.ortho_context[typ2]
if ( (typ2ortho_context & _ORTHO_BEG_UC) and
not (typ2ortho_context & _ORTHO_MID_UC) ):
return True
#////////////////////////////////////////////////////////////
#{ Log Likelihoods
#////////////////////////////////////////////////////////////
# helper for _reclassify_abbrev_types:
@staticmethod
def _dunning_log_likelihood(count_a, count_b, count_ab, N):
"""
A function that calculates the modified Dunning log-likelihood
ratio scores for abbreviation candidates. The details of how
this works is available in the paper.
"""
p1 = float(count_b) / N
p2 = 0.99
null_hypo = (float(count_ab) * math.log(p1) +
(count_a - count_ab) * math.log(1.0 - p1))
alt_hypo = (float(count_ab) * math.log(p2) +
(count_a - count_ab) * math.log(1.0 - p2))
likelihood = null_hypo - alt_hypo
return (-2.0 * likelihood)
@staticmethod
def _col_log_likelihood(count_a, count_b, count_ab, N):
"""
A function that will just compute log-likelihood estimate, in
the original paper it's described in algorithm 6 and 7.
This *should* be the original Dunning log-likelihood values,
unlike the previous log_l function where it used modified
Dunning log-likelihood values
"""
import math
p = 1.0 * count_b / N
p1 = 1.0 * count_ab / count_a
p2 = 1.0 * (count_b - count_ab) / (N - count_a)
summand1 = (count_ab * math.log(p) +
(count_a - count_ab) * math.log(1.0 - p))
summand2 = ((count_b - count_ab) * math.log(p) +
(N - count_a - count_b + count_ab) * math.log(1.0 - p))
if count_a == count_ab:
summand3 = 0
else:
summand3 = (count_ab * math.log(p1) +
(count_a - count_ab) * math.log(1.0 - p1))
if count_b == count_ab:
summand4 = 0
else:
summand4 = ((count_b - count_ab) * math.log(p2) +
(N - count_a - count_b + count_ab) * math.log(1.0 - p2))
likelihood = summand1 + summand2 - summand3 - summand4
return (-2.0 * likelihood)
#////////////////////////////////////////////////////////////
#{ Collocation Finder
#////////////////////////////////////////////////////////////
def _is_potential_collocation(self, aug_tok1, aug_tok2):
"""
Returns True if the pair of tokens may form a collocation given
log-likelihood statistics.
"""
return ((self.INCLUDE_ALL_COLLOCS or
(self.INCLUDE_ABBREV_COLLOCS and aug_tok1.abbr) or
(aug_tok1.sentbreak and
(aug_tok1.is_number or aug_tok1.is_initial)))
and aug_tok1.is_non_punct
and aug_tok2.is_non_punct)
def _find_collocations(self):
"""
Generates likely collocations and their log-likelihood.
"""
for types, col_count in self._collocation_fdist.iteritems():
try:
typ1, typ2 = types
except TypeError:
# types may be None after calling freq_threshold()
continue
if typ2 in self._params.sent_starters:
continue
typ1_count = self._type_fdist[typ1]+self._type_fdist[typ1+'.']
typ2_count = self._type_fdist[typ2]+self._type_fdist[typ2+'.']
if (typ1_count > 1 and typ2_count > 1
and self.MIN_COLLOC_FREQ <
col_count <= min(typ1_count, typ2_count)):
ll = self._col_log_likelihood(typ1_count, typ2_count,
col_count, self._type_fdist.N())
# Filter out the not-so-collocative
if (ll >= self.COLLOCATION and
(float(self._type_fdist.N())/typ1_count >
float(typ2_count)/col_count)):
yield (typ1, typ2), ll
#////////////////////////////////////////////////////////////
#{ Sentence-Starter Finder
#////////////////////////////////////////////////////////////
def _is_potential_sent_starter(self, cur_tok, prev_tok):
"""
Returns True given a token and the token that preceds it if it
seems clear that the token is beginning a sentence.
"""
# If a token (i) is preceded by a sentece break that is
# not a potential ordinal number or initial, and (ii) is
# alphabetic, then it is a a sentence-starter.
return ( prev_tok.sentbreak and
not (prev_tok.is_number or prev_tok.is_initial) and
cur_tok.is_alpha )
def _find_sent_starters(self):
"""
Uses collocation heuristics for each candidate token to
determine if it frequently starts sentences.
"""
for (typ, typ_at_break_count) in self._sent_starter_fdist.iteritems():
if not typ:
continue
typ_count = self._type_fdist[typ]+self._type_fdist[typ+'.']
if typ_count < typ_at_break_count:
# needed after freq_threshold
continue
ll = self._col_log_likelihood(self._sentbreak_count, typ_count,
typ_at_break_count,
self._type_fdist.N())
if (ll >= self.SENT_STARTER and
float(self._type_fdist.N())/self._sentbreak_count >
float(typ_count)/typ_at_break_count):
yield typ, ll
def _get_sentbreak_count(self, tokens):
"""
Returns the number of sentence breaks marked in a given set of
augmented tokens.
"""
return sum(1 for aug_tok in tokens if aug_tok.sentbreak)
######################################################################
#{ Punkt Sentence Tokenizer
######################################################################
class PunktSentenceTokenizer(PunktBaseClass,TokenizerI):
"""
A sentence tokenizer which uses an unsupervised algorithm to build
a model for abbreviation words, collocations, and words that start
sentences; and then uses that model to find sentence boundaries.
This approach has been shown to work well for many European
languages.
"""
def __init__(self, train_text=None, verbose=False,
lang_vars=PunktLanguageVars(), token_cls=PunktToken):
"""
train_text can either be the sole training text for this sentence
boundary detector, or can be a PunktParameters object.
"""
PunktBaseClass.__init__(self, lang_vars=lang_vars,
token_cls=token_cls)
if train_text:
self._params = self.train(train_text, verbose)
def train(self, train_text, verbose=False):
"""
Derives parameters from a given training text, or uses the parameters
given. Repeated calls to this method destroy previous parameters. For
incremental training, instantiate a separate PunktTrainer instance.
"""
if type(train_text) not in (type(''), type(u'')):
return train_text
return PunktTrainer(train_text, lang_vars=self._lang_vars,
token_cls=self._Token).get_params()
#////////////////////////////////////////////////////////////
#{ Tokenization
#////////////////////////////////////////////////////////////
def tokenize(self, text, realign_boundaries=False):
"""
Given a text, returns a list of the sentences in that text.
"""
return list(self.sentences_from_text(text, realign_boundaries))
def debug_decisions(self, text):
"""
Classifies candidate periods as sentence breaks, yielding a dict for
each that may be used to understand why the decision was made.
See format_debug_decision() to help make this output readable.
"""
for match in self._lang_vars.period_context_re().finditer(text):
decision_text = match.group() + match.group('after_tok')
tokens = self._tokenize_words(decision_text)
tokens = list(self._annotate_first_pass(tokens))
while not tokens[0].period_final:
tokens.pop(0)
yield dict(period_index=match.end() - 1,
text=decision_text,
type1=tokens[0].type,
type2=tokens[1].type,
type1_in_abbrs=bool(tokens[0].abbr),
type1_is_initial=bool(tokens[0].is_initial),
type2_is_sent_starter=tokens[1].type_no_sentperiod in self._params.sent_starters,
type2_ortho_heuristic=self._ortho_heuristic(tokens[1]),
type2_ortho_contexts=set(self._params._debug_ortho_context(tokens[1].type_no_sentperiod)),
collocation=(tokens[0].type_no_sentperiod, tokens[1].type_no_sentperiod) in self._params.collocations,
reason=self._second_pass_annotation(tokens[0], tokens[1]) or REASON_DEFAULT_DECISION,
break_decision=tokens[0].sentbreak,
)
def span_tokenize(self, text):
"""
Given a text, returns a list of the (start, end) spans of sentences
in the text.
"""
return [(sl.start, sl.stop) for sl in self._slices_from_text(text)]
def sentences_from_text(self, text, realign_boundaries=False):
"""
Given a text, generates the sentences in that text by only
testing candidate sentence breaks. If realign_boundaries is
True, includes in the sentence closing punctuation that
follows the period.
"""
sents = [text[sl] for sl in self._slices_from_text(text)]
if realign_boundaries:
sents = self._realign_boundaries(sents)
return sents
def _slices_from_text(self, text):
last_break = 0
for match in self._lang_vars.period_context_re().finditer(text):
context = match.group() + match.group('after_tok')
if self.text_contains_sentbreak(context):
yield slice(last_break, match.end())
if match.group('next_tok'):
# next sentence starts after whitespace
last_break = match.start('next_tok')
else:
# next sentence starts at following punctuation
last_break = match.end()
yield slice(last_break, len(text))
def _realign_boundaries(self, sents):
"""
Attempts to realign punctuation that falls after the period but
should otherwise be included in the same sentence.
For example: "(Sent1.) Sent2." will otherwise be split as::
["(Sent1.", ") Sent1."].
This method will produce::
["(Sent1.)", "Sent2."].
"""
realign = 0
for s1, s2 in _pair_iter(sents):
s1 = s1[realign:]
if not s2:
if s1:
yield s1
continue
m = self._lang_vars.re_boundary_realignment.match(s2)
if m:
yield s1 + m.group(0).strip()
realign = m.end()
else:
realign = 0
if s1:
yield s1
def text_contains_sentbreak(self, text):
"""
Returns True if the given text includes a sentence break.
"""
found = False # used to ignore last token
for t in self._annotate_tokens(self._tokenize_words(text)):
if found:
return True
if t.sentbreak:
found = True
return False
def sentences_from_text_legacy(self, text):
"""
Given a text, generates the sentences in that text. Annotates all
tokens, rather than just those with possible sentence breaks. Should
produce the same results as ``sentences_from_text``.
"""
tokens = self._annotate_tokens(self._tokenize_words(text))
return self._build_sentence_list(text, tokens)
def sentences_from_tokens(self, tokens):
"""
Given a sequence of tokens, generates lists of tokens, each list
corresponding to a sentence.
"""
tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens))
sentence = []
for aug_tok in tokens:
sentence.append(aug_tok.tok)
if aug_tok.sentbreak:
yield sentence
sentence = []
if sentence:
yield sentence
def _annotate_tokens(self, tokens):
"""
Given a set of tokens augmented with markers for line-start and
paragraph-start, returns an iterator through those tokens with full
annotation including predicted sentence breaks.
"""
# Make a preliminary pass through the document, marking likely
# sentence breaks, abbreviations, and ellipsis tokens.
tokens = self._annotate_first_pass(tokens)
# Make a second pass through the document, using token context
# information to change our preliminary decisions about where
# sentence breaks, abbreviations, and ellipsis occurs.
tokens = self._annotate_second_pass(tokens)
## [XX] TESTING
#tokens = list(tokens)
#self.dump(tokens)
return tokens
def _build_sentence_list(self, text, tokens):
"""
Given the original text and the list of augmented word tokens,
construct and return a tokenized list of sentence strings.
"""
# Most of the work here is making sure that we put the right
# pieces of whitespace back in all the right places.
# Our position in the source text, used to keep track of which
# whitespace to add:
pos = 0
# A regular expression that finds pieces of whitespace:
WS_REGEXP = re.compile(r'\s*')
sentence = ''
for aug_tok in tokens:
tok = aug_tok.tok
# Find the whitespace before this token, and update pos.
ws = WS_REGEXP.match(text, pos).group()
pos += len(ws)
# Some of the rules used by the punkt word tokenizer
# strip whitespace out of the text, resulting in tokens
# that contain whitespace in the source text. If our
# token doesn't match, see if adding whitespace helps.
# If so, then use the version with whitespace.
if text[pos:pos+len(tok)] != tok:
pat = '\s*'.join(re.escape(c) for c in tok)
m = re.compile(pat).match(text,pos)
if m: tok = m.group()
# Move our position pointer to the end of the token.
assert text[pos:pos+len(tok)] == tok
pos += len(tok)
# Add this token. If it's not at the beginning of the
# sentence, then include any whitespace that separated it
# from the previous token.
if sentence:
sentence += ws
sentence += tok
# If we're at a sentence break, then start a new sentence.
if aug_tok.sentbreak:
yield sentence
sentence = ''
# If the last sentence is emtpy, discard it.
if sentence:
yield sentence
# [XX] TESTING
def dump(self, tokens):
print('writing to /tmp/punkt.new...')
out = open('/tmp/punkt.new', 'w')
for aug_tok in tokens:
if aug_tok.parastart:
out.write('\n\n')
elif aug_tok.linestart:
out.write('\n')
else:
out.write(' ')
out.write(str(aug_tok))
out.close()
#////////////////////////////////////////////////////////////
#{ Customization Variables
#////////////////////////////////////////////////////////////
PUNCTUATION = tuple(';:,.!?')
#////////////////////////////////////////////////////////////
#{ Annotation Procedures
#////////////////////////////////////////////////////////////
def _annotate_second_pass(self, tokens):
"""
Performs a token-based classification (section 4) over the given
tokens, making use of the orthographic heuristic (4.1.1), collocation
heuristic (4.1.2) and frequent sentence starter heuristic (4.1.3).
"""
for t1, t2 in _pair_iter(tokens):
self._second_pass_annotation(t1, t2)
yield t1
def _second_pass_annotation(self, aug_tok1, aug_tok2):
"""
Performs token-based classification over a pair of contiguous tokens
updating the first.
"""
# Is it the last token? We can't do anything then.
if not aug_tok2:
return
tok = aug_tok1.tok
if not aug_tok1.period_final:
# We only care about words ending in periods.
return
typ = aug_tok1.type_no_period
next_tok = aug_tok2.tok
next_typ = aug_tok2.type_no_sentperiod
tok_is_initial = aug_tok1.is_initial
# [4.1.2. Collocation Heuristic] If there's a
# collocation between the word before and after the
# period, then label tok as an abbreviation and NOT
# a sentence break. Note that collocations with
# frequent sentence starters as their second word are
# excluded in training.
if (typ, next_typ) in self._params.collocations:
aug_tok1.sentbreak = False
aug_tok1.abbr = True
return REASON_KNOWN_COLLOCATION
# [4.2. Token-Based Reclassification of Abbreviations] If
# the token is an abbreviation or an ellipsis, then decide
# whether we should *also* classify it as a sentbreak.
if ( (aug_tok1.abbr or aug_tok1.ellipsis) and
(not tok_is_initial) ):
# [4.1.1. Orthographic Heuristic] Check if there's
# orthogrpahic evidence about whether the next word
# starts a sentence or not.
is_sent_starter = self._ortho_heuristic(aug_tok2)
if is_sent_starter == True:
aug_tok1.sentbreak = True
return REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC
# [4.1.3. Frequent Sentence Starter Heruistic] If the
# next word is capitalized, and is a member of the
# frequent-sentence-starters list, then label tok as a
# sentence break.
if ( aug_tok2.first_upper and
next_typ in self._params.sent_starters):
aug_tok1.sentbreak = True
return REASON_ABBR_WITH_SENTENCE_STARTER
# [4.3. Token-Based Detection of Initials and Ordinals]
# Check if any initials or ordinals tokens that are marked
# as sentbreaks should be reclassified as abbreviations.
if tok_is_initial or typ == '##number##':
# [4.1.1. Orthographic Heuristic] Check if there's
# orthogrpahic evidence about whether the next word
# starts a sentence or not.
is_sent_starter = self._ortho_heuristic(aug_tok2)
if is_sent_starter == False:
aug_tok1.sentbreak = False
aug_tok1.abbr = True
if tok_is_initial:
return REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC
else:
return REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC
# Special heuristic for initials: if orthogrpahic
# heuristc is unknown, and next word is always
# capitalized, then mark as abbrev (eg: J. Bach).
if ( is_sent_starter == 'unknown' and tok_is_initial and
aug_tok2.first_upper and
not (self._params.ortho_context[next_typ] & _ORTHO_LC) ):
aug_tok1.sentbreak = False
aug_tok1.abbr = True
return REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC
return
def _ortho_heuristic(self, aug_tok):
"""
Decide whether the given token is the first token in a sentence.
"""
# Sentences don't start with punctuation marks:
if aug_tok.tok in self.PUNCTUATION:
return False
ortho_context = self._params.ortho_context[aug_tok.type_no_sentperiod]
# If the word is capitalized, occurs at least once with a
# lower case first letter, and never occurs with an upper case
# first letter sentence-internally, then it's a sentence starter.
if ( aug_tok.first_upper and
(ortho_context & _ORTHO_LC) and
not (ortho_context & _ORTHO_MID_UC) ):
return True
# If the word is lower case, and either (a) we've seen it used
# with upper case, or (b) we've never seen it used
# sentence-initially with lower case, then it's not a sentence
# starter.
if ( aug_tok.first_lower and
((ortho_context & _ORTHO_UC) or
not (ortho_context & _ORTHO_BEG_LC)) ):
return False
# Otherwise, we're not sure.
return 'unknown'
DEBUG_DECISION_FMT = '''Text: %(text)r (at offset %(period_index)d)
Sentence break? %(break_decision)s (%(reason)s)
Collocation? %(collocation)s
%(type1)r:
known abbreviation: %(type1_in_abbrs)s
is initial: %(type1_is_initial)s
%(type2)r:
known sentence starter: %(type2_is_sent_starter)s
orthographic heuristic suggests is a sentence starter? %(type2_ortho_heuristic)s
orthographic contexts in training: %(type2_ortho_contexts)s
'''
def format_debug_decision(d):
return DEBUG_DECISION_FMT % d
def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer):
"""Builds a punkt model and applies it to the same text"""
cleanup = lambda s: re.compile(r'(?:\r|^\s+)', re.MULTILINE).sub('', s).replace('\n', ' ')
trainer = train_cls()
trainer.INCLUDE_ALL_COLLOCS = True
trainer.train(text)
sbd = tok_cls(trainer.get_params())
for l in sbd.sentences_from_text(text, realign_boundaries=True):
print(cleanup(l))
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
| {
"content_hash": "b12a784b498de04c05460bcd53eb5d33",
"timestamp": "",
"source": "github",
"line_count": 1584,
"max_line_length": 118,
"avg_line_length": 38.53977272727273,
"alnum_prop": 0.5558176486969056,
"repo_name": "sivu22/nltk-on-gae",
"id": "fbb1cb3612da5aeb71d64854e84255103f4e3700",
"size": "61512",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "GAE/nltk/tokenize/punkt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "178"
},
{
"name": "Python",
"bytes": "3591141"
}
],
"symlink_target": ""
} |
import socket
import os
import select
import sys
HOSTNAME = 'localhost'
PORT = 5000
def prompt():
sys.stdout.write("YOU > ")
sys.stdout.flush()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
try:
s.connect((HOSTNAME, PORT))
except:
print("ERROR > Unable to connect");
sys.exit()
prompt()
while 1:
socket_list = [sys.stdin, s]
# Get the list sockets which are readable
read_sockets, write_sockets, error_sockets = select.select(socket_list , [], [])
for sock in read_sockets:
#incoming message from remote server
if sock == s:
data = sock.recv(4096)
if not data :
print '\nDisconnected from chat server'
sys.exit()
else :
#print data
sys.stdout.write("\n")
sys.stdout.write(data)
prompt()
#user entered a message
else :
msg = sys.stdin.readline()
s.send(msg)
prompt()
| {
"content_hash": "c4372e03b8ae83cd7feb3b5457c8b020",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 81,
"avg_line_length": 18.3125,
"alnum_prop": 0.6496018202502845,
"repo_name": "rusucosmin/courses",
"id": "a0a4aebbcc96f31514ffc9681eceb646af3f8ed8",
"size": "879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ubb/networking/lab5/chat_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "66934"
},
{
"name": "Awk",
"bytes": "390"
},
{
"name": "C",
"bytes": "506519"
},
{
"name": "C#",
"bytes": "18080"
},
{
"name": "C++",
"bytes": "300998"
},
{
"name": "CMake",
"bytes": "1294"
},
{
"name": "CSS",
"bytes": "34492"
},
{
"name": "Common Lisp",
"bytes": "24300"
},
{
"name": "HTML",
"bytes": "1125773"
},
{
"name": "Hack",
"bytes": "1121"
},
{
"name": "Java",
"bytes": "158144"
},
{
"name": "JavaScript",
"bytes": "35305"
},
{
"name": "Jupyter Notebook",
"bytes": "20740224"
},
{
"name": "Lex",
"bytes": "3733"
},
{
"name": "M",
"bytes": "1745"
},
{
"name": "MATLAB",
"bytes": "26207"
},
{
"name": "Makefile",
"bytes": "398"
},
{
"name": "NewLisp",
"bytes": "197"
},
{
"name": "PHP",
"bytes": "56891"
},
{
"name": "Pascal",
"bytes": "672"
},
{
"name": "Prolog",
"bytes": "25141"
},
{
"name": "Python",
"bytes": "440544"
},
{
"name": "R",
"bytes": "3431"
},
{
"name": "Roff",
"bytes": "196"
},
{
"name": "Ruby",
"bytes": "27888"
},
{
"name": "Scala",
"bytes": "620018"
},
{
"name": "Shell",
"bytes": "25651"
},
{
"name": "TeX",
"bytes": "22510"
},
{
"name": "TypeScript",
"bytes": "14752"
},
{
"name": "XSLT",
"bytes": "1489"
},
{
"name": "Yacc",
"bytes": "14087"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from typing import Dict, Type
from .base import BiddingStrategyServiceTransport
from .grpc import BiddingStrategyServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[BiddingStrategyServiceTransport]]
_transport_registry["grpc"] = BiddingStrategyServiceGrpcTransport
__all__ = (
"BiddingStrategyServiceTransport",
"BiddingStrategyServiceGrpcTransport",
)
| {
"content_hash": "0c73eba3a560dd5af9b304628b5ef2ec",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 65,
"avg_line_length": 29.5625,
"alnum_prop": 0.7970401691331924,
"repo_name": "googleads/google-ads-python",
"id": "80d3172dc8a2deb19ae47923a09ed44e4c30d63f",
"size": "1073",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v12/services/services/bidding_strategy_service/transports/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
import logging
import os
import shutil
import threading
from collections import defaultdict
from twitter.common.collections import maybe_list
from pants.backend.jvm.ivy_utils import IvyUtils
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.cache_manager import VersionedTargetSet
from pants.base.exceptions import TaskError
from pants.base.fingerprint_strategy import DefaultFingerprintHashingMixin, FingerprintStrategy
from pants.ivy.bootstrapper import Bootstrapper
from pants.java.util import execute_runner
from pants.util.dirutil import safe_mkdir
logger = logging.getLogger(__name__)
class IvyResolveFingerprintStrategy(DefaultFingerprintHashingMixin, FingerprintStrategy):
def compute_fingerprint(self, target):
if isinstance(target, JarLibrary):
return target.payload.fingerprint()
if isinstance(target, JvmTarget):
if target.payload.excludes or target.payload.configurations:
return target.payload.fingerprint(field_keys=('excludes', 'configurations'))
return None
class IvyTaskMixin(object):
@classmethod
def register_options(cls, register):
super(IvyTaskMixin, cls).register_options(register)
register('--jvm-options', action='append', metavar='<option>...',
help='Run Ivy with these extra jvm options.')
# Protect writes to the global map of jar path -> symlinks to that jar.
symlink_map_lock = threading.Lock()
def ivy_resolve(self,
targets,
executor=None,
silent=False,
workunit_name=None,
confs=None,
custom_args=None):
"""Populates the product 'ivy_resolve_symlink_map' from the specified targets."""
if not targets:
return ([], set())
# NOTE: Always pass all the targets to exec_ivy, as they're used to calculate the name of
# the generated module, which in turn determines the location of the XML report file
# ivy generates. We recompute this name from targets later in order to find that file.
# TODO: This is fragile. Refactor so that we're not computing the name twice.
ivy = Bootstrapper.default_ivy(bootstrap_workunit_factory=self.context.new_workunit)
ivy_workdir = os.path.join(self.context.options.for_global_scope().pants_workdir, 'ivy')
fingerprint_strategy = IvyResolveFingerprintStrategy()
with self.invalidated(targets,
invalidate_dependents=False,
silent=silent,
fingerprint_strategy=fingerprint_strategy) as invalidation_check:
if not invalidation_check.all_vts:
return ([], set())
global_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
target_workdir = os.path.join(ivy_workdir, global_vts.cache_key.hash)
target_classpath_file = os.path.join(target_workdir, 'classpath')
raw_target_classpath_file = target_classpath_file + '.raw'
raw_target_classpath_file_tmp = raw_target_classpath_file + '.tmp'
# A common dir for symlinks into the ivy2 cache. This ensures that paths to jars
# in artifact-cached analysis files are consistent across systems.
# Note that we have one global, well-known symlink dir, again so that paths are
# consistent across builds.
symlink_dir = os.path.join(ivy_workdir, 'jars')
# Note that it's possible for all targets to be valid but for no classpath file to exist at
# target_classpath_file, e.g., if we previously built a superset of targets.
if invalidation_check.invalid_vts or not os.path.exists(raw_target_classpath_file):
args = ['-cachepath', raw_target_classpath_file_tmp] + (custom_args if custom_args else [])
self.exec_ivy(
target_workdir=target_workdir,
targets=global_vts.targets,
args=args,
executor=executor,
ivy=ivy,
workunit_name=workunit_name,
confs=confs)
if not os.path.exists(raw_target_classpath_file_tmp):
raise TaskError('Ivy failed to create classpath file at %s'
% raw_target_classpath_file_tmp)
shutil.move(raw_target_classpath_file_tmp, raw_target_classpath_file)
logger.debug('Copied ivy classfile file to {dest}'.format(dest=raw_target_classpath_file))
if self.artifact_cache_writes_enabled():
self.update_artifact_cache([(global_vts, [raw_target_classpath_file])])
# Make our actual classpath be symlinks, so that the paths are uniform across systems.
# Note that we must do this even if we read the raw_target_classpath_file from the artifact
# cache. If we cache the target_classpath_file we won't know how to create the symlinks.
symlink_map = IvyUtils.symlink_cachepath(ivy.ivy_cache_dir, raw_target_classpath_file,
symlink_dir, target_classpath_file)
with IvyTaskMixin.symlink_map_lock:
products = self.context.products
all_symlinks_map = products.get_data('ivy_resolve_symlink_map') or defaultdict(list)
for path, symlink in symlink_map.items():
all_symlinks_map[os.path.realpath(path)].append(symlink)
products.safe_create_data('ivy_resolve_symlink_map',
lambda: all_symlinks_map)
with IvyUtils.cachepath(target_classpath_file) as classpath:
stripped_classpath = [path.strip() for path in classpath]
return (stripped_classpath, global_vts.targets)
def mapjars(self, genmap, target, executor, jars=None):
"""Resolves jars for the target and stores their locations in genmap.
:param genmap: The jar_dependencies ProductMapping entry for the required products.
:param target: The target whose jar dependencies are being retrieved.
:param jars: If specified, resolves the given jars rather than
:type jars: List of :class:`pants.backend.jvm.targets.jar_dependency.JarDependency` (jar())
objects.
"""
mapdir = os.path.join(self.workdir, 'mapped-jars', target.id)
safe_mkdir(mapdir, clean=True)
ivyargs = [
'-retrieve', '%s/[organisation]/[artifact]/[conf]/'
'[organisation]-[artifact]-[revision](-[classifier]).[ext]' % mapdir,
'-symlink',
]
confs = maybe_list(target.payload.get_field_value('configurations') or [])
self.exec_ivy(mapdir,
[target],
executor=executor,
args=ivyargs,
confs=confs,
ivy=Bootstrapper.default_ivy(),
workunit_name='map-jars',
jars=jars)
for org in os.listdir(mapdir):
orgdir = os.path.join(mapdir, org)
if os.path.isdir(orgdir):
for name in os.listdir(orgdir):
artifactdir = os.path.join(orgdir, name)
if os.path.isdir(artifactdir):
for conf in os.listdir(artifactdir):
confdir = os.path.join(artifactdir, conf)
for f in os.listdir(confdir):
# TODO(John Sirois): kill the org and (org, name) exclude mappings in favor of a
# conf whitelist
genmap.add(org, confdir).append(f)
genmap.add((org, name), confdir).append(f)
genmap.add(target, confdir).append(f)
genmap.add((target, conf), confdir).append(f)
genmap.add((org, name, conf), confdir).append(f)
def exec_ivy(self,
target_workdir,
targets,
args,
executor=None,
confs=None,
ivy=None,
workunit_name='ivy',
jars=None):
ivy_jvm_options = copy.copy(self.get_options().jvm_options)
# Disable cache in File.getCanonicalPath(), makes Ivy work with -symlink option properly on ng.
ivy_jvm_options.append('-Dsun.io.useCanonCaches=false')
ivy = ivy or Bootstrapper.default_ivy()
ivyxml = os.path.join(target_workdir, 'ivy.xml')
if not jars:
jars, excludes = IvyUtils.calculate_classpath(targets)
else:
excludes = set()
ivy_args = ['-ivy', ivyxml]
confs_to_resolve = confs or ['default']
ivy_args.append('-confs')
ivy_args.extend(confs_to_resolve)
ivy_args.extend(args)
with IvyUtils.ivy_lock:
IvyUtils.generate_ivy(targets, jars, excludes, ivyxml, confs_to_resolve)
runner = ivy.runner(jvm_options=ivy_jvm_options, args=ivy_args, executor=executor)
try:
result = execute_runner(runner, workunit_factory=self.context.new_workunit,
workunit_name=workunit_name)
if result != 0:
raise TaskError('Ivy returned %d' % result)
except runner.executor.Error as e:
raise TaskError(e)
| {
"content_hash": "20fa0e337943bc598f8cac0db1f66f26",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 99,
"avg_line_length": 43.29665071770335,
"alnum_prop": 0.653442369322577,
"repo_name": "tejal29/pants",
"id": "48dda45c87025d79bc319cacc0b27276f5511e39",
"size": "9196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/jvm/tasks/ivy_task_mixin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10977"
},
{
"name": "GAP",
"bytes": "4810"
},
{
"name": "HTML",
"bytes": "75563"
},
{
"name": "Java",
"bytes": "47798"
},
{
"name": "JavaScript",
"bytes": "10157"
},
{
"name": "Protocol Buffer",
"bytes": "5348"
},
{
"name": "Python",
"bytes": "2364916"
},
{
"name": "Scala",
"bytes": "5556"
},
{
"name": "Shell",
"bytes": "39930"
},
{
"name": "Thrift",
"bytes": "1841"
},
{
"name": "XML",
"bytes": "8658"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from uuid import uuid4
import six
import logging
from django.utils.translation import ugettext_lazy as _
from rest_framework.serializers import ValidationError
from six.moves.urllib.parse import urlencode
from sentry.integrations import (
IntegrationInstallation,
IntegrationFeatures,
IntegrationProvider,
IntegrationMetadata,
FeatureDescription,
)
from sentry import options
from sentry.constants import ObjectStatus
from sentry.pipeline import NestedPipelineView
from sentry.identity.pipeline import IdentityProviderPipeline
from sentry.utils.http import absolute_uri
from sentry.models import (
Organization,
Integration,
Project,
ProjectKey,
User,
SentryAppInstallation,
SentryAppInstallationForProvider,
)
from sentry.utils.compat import map
from sentry.shared_integrations.exceptions import IntegrationError, ApiError
from sentry.mediators.sentry_apps import InternalCreator
from .client import VercelClient
logger = logging.getLogger("sentry.integrations.vercel")
DESCRIPTION = _(
"""
Vercel is an all-in-one platform with Global CDN supporting static & JAMstack deployment and Serverless Functions.
"""
)
FEATURES = [
FeatureDescription(
"""
Connect your Sentry and Vercel projects to automatically upload source maps and notify Sentry of new releases being deployed.
""",
IntegrationFeatures.DEPLOYMENT,
)
]
INSTALL_NOTICE_TEXT = _(
"Visit the Vercel Marketplace to install this integration. After installing the"
" Sentry integration, you'll be redirected back to Sentry to finish syncing Vercel and Sentry projects."
)
external_install = {
"url": u"https://vercel.com/integrations/%s/add" % options.get("vercel.integration-slug"),
"buttonText": _("Vercel Marketplace"),
"noticeText": _(INSTALL_NOTICE_TEXT),
}
configure_integration = {"title": _("Connect Your Projects")}
create_project_instruction = _("Don't have a project yet? Click [here]({}) to create one.")
install_source_code_integration = _(
"Install a [source code integration]({}) and configure your repositories."
)
disable_dialog = {
"actionText": _("Visit Vercel"),
"body": _(
"In order to uninstall this integration, you must go"
" to Vercel and uninstall there by clicking 'Remove Configuration'."
),
}
metadata = IntegrationMetadata(
description=DESCRIPTION.strip(),
features=FEATURES,
author="The Sentry Team",
noun=_("Installation"),
issue_url="https://github.com/getsentry/sentry/issues/new?title=Vercel%20Integration:%20&labels=Component%3A%20Integrations",
source_url="https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/vercel",
aspects={
"externalInstall": external_install,
"configure_integration": configure_integration,
"disable_dialog": disable_dialog,
},
)
internal_integration_overview = (
"This internal integration was auto-generated during the installation process of your Vercel"
" integration. It is needed to provide the token used to create a release. If this integration is "
"deleted, your Vercel integration will stop working!"
)
class VercelIntegration(IntegrationInstallation):
@property
def metadata(self):
return self.model.metadata
def get_dynamic_display_information(self):
organization = Organization.objects.get_from_cache(id=self.organization_id)
source_code_link = absolute_uri(
u"/settings/%s/integrations/?%s"
% (organization.slug, urlencode({"category": "source code management"}))
)
add_project_link = absolute_uri(u"/organizations/%s/projects/new/" % (organization.slug))
return {
"configure_integration": {
"instructions": [
create_project_instruction.format(add_project_link),
install_source_code_integration.format(source_code_link),
]
},
"integration_detail": {"uninstallationUrl": self.get_manage_url()},
}
def get_manage_url(self):
slug = self.get_slug()
configuration_id = self.get_configuration_id()
if configuration_id:
if self.metadata["installation_type"] == "team":
dashboard_url = u"https://vercel.com/dashboard/%s/" % slug
else:
dashboard_url = "https://vercel.com/dashboard/"
return u"%sintegrations/%s" % (dashboard_url, configuration_id)
return None
def get_client(self):
access_token = self.metadata["access_token"]
if self.metadata["installation_type"] == "team":
return VercelClient(access_token, self.model.external_id)
return VercelClient(access_token)
# note this could return a different integration if the user has multiple
# installations with the same organization
def get_configuration_id(self):
for configuration_id, data in self.metadata["configurations"].items():
if data["organization_id"] == self.organization_id:
return configuration_id
logger.error(
"could not find matching org",
extra={"organization_id": self.organization_id, "integration_id": self.model.id},
)
return None
def get_slug(self):
client = self.get_client()
if self.metadata["installation_type"] == "team":
team = client.get_team()
return team["slug"]
else:
user = client.get_user()
return user["username"]
def get_organization_config(self):
vercel_client = self.get_client()
# TODO: add try/catch if we get API failure
slug = self.get_slug()
base_url = u"https://vercel.com/%s" % slug
vercel_projects = [
{"value": p["id"], "label": p["name"], "url": u"%s/%s" % (base_url, p["name"])}
for p in vercel_client.get_projects()
]
proj_fields = ["id", "platform", "name", "slug"]
sentry_projects = map(
lambda proj: {key: proj[key] for key in proj_fields},
(
Project.objects.filter(
organization_id=self.organization_id, status=ObjectStatus.VISIBLE
)
.order_by("slug")
.values(*proj_fields)
),
)
fields = [
{
"name": "project_mappings",
"type": "project_mapper",
"mappedDropdown": {
"items": vercel_projects,
"placeholder": _("Vercel project..."),
},
"sentryProjects": sentry_projects,
"nextButton": {
"allowedDomain": "https://vercel.com",
"description": _(
"Link your Sentry projects to complete your installation on Vercel"
),
"text": _("Complete on Vercel"),
},
"iconType": "vercel",
}
]
return fields
def update_organization_config(self, data):
# data = {"project_mappings": [[sentry_project_id, vercel_project_id]]}
vercel_client = self.get_client()
config = self.org_integration.config
try:
new_mappings = data["project_mappings"]
except KeyError:
raise ValidationError("Failed to update configuration.")
old_mappings = config.get("project_mappings") or []
for mapping in new_mappings:
# skip any mappings that already exist
if mapping in old_mappings:
continue
[sentry_project_id, vercel_project_id] = mapping
sentry_project = Project.objects.get(id=sentry_project_id)
enabled_dsn = ProjectKey.get_default(project=sentry_project)
if not enabled_dsn:
raise ValidationError(
{"project_mappings": ["You must have an enabled DSN to continue!"]}
)
source_code_provider = self.get_source_code_provider(vercel_client, vercel_project_id)
if not source_code_provider:
raise ValidationError(
{
"project_mappings": [
"You must connect your Vercel project to a Git repository to continue!"
]
}
)
sentry_project_dsn = enabled_dsn.get_dsn(public=True)
uuid = uuid4().hex
sentry_app_installation = SentryAppInstallationForProvider.objects.get(
organization=sentry_project.organization.id, provider="vercel"
)
sentry_auth_token = sentry_app_installation.get_token(
self.organization_id, provider="vercel"
)
secret_names = [
"SENTRY_ORG_%s" % uuid,
"SENTRY_PROJECT_%s" % uuid,
"NEXT_PUBLIC_SENTRY_DSN_%s" % uuid,
"SENTRY_AUTH_TOKEN_%s" % uuid,
]
values = [
sentry_project.organization.slug,
sentry_project.slug,
sentry_project_dsn,
sentry_auth_token,
]
env_var_names = [
"SENTRY_ORG",
"SENTRY_PROJECT",
"NEXT_PUBLIC_SENTRY_DSN",
"SENTRY_AUTH_TOKEN",
"VERCEL_%s_COMMIT_SHA" % source_code_provider.upper(),
]
secrets = []
for name, val in zip(secret_names, values):
secrets.append(self.create_secret(vercel_client, vercel_project_id, name, val))
secrets.append("")
for secret, env_var in zip(secrets, env_var_names):
self.create_env_var(vercel_client, vercel_project_id, env_var, secret)
config.update(data)
self.org_integration.update(config=config)
def get_source_code_provider(self, client, vercel_project_id):
try:
return client.get_source_code_provider(vercel_project_id)
except KeyError:
return None
def get_env_vars(self, client, vercel_project_id):
return client.get_env_vars(vercel_project_id)
def env_var_already_exists(self, client, vercel_project_id, name):
return any(
[
env_var
for env_var in self.get_env_vars(client, vercel_project_id)["envs"]
if env_var["key"] == name
]
)
def create_secret(self, client, vercel_project_id, name, value):
return client.create_secret(vercel_project_id, name, value)
def create_env_var(self, client, vercel_project_id, key, value):
if not self.env_var_already_exists(client, vercel_project_id, key):
return client.create_env_variable(vercel_project_id, key, value)
self.update_env_variable(client, vercel_project_id, key, value)
def update_env_variable(self, client, vercel_project_id, key, value):
return client.update_env_variable(vercel_project_id, key, value)
class VercelIntegrationProvider(IntegrationProvider):
key = "vercel"
name = "Vercel"
can_add = False
can_disable = True
metadata = metadata
integration_cls = VercelIntegration
features = frozenset([IntegrationFeatures.DEPLOYMENT])
oauth_redirect_url = "/extensions/vercel/configure/"
def get_pipeline_views(self):
identity_pipeline_config = {"redirect_url": absolute_uri(self.oauth_redirect_url)}
identity_pipeline_view = NestedPipelineView(
bind_key="identity",
provider_key=self.key,
pipeline_cls=IdentityProviderPipeline,
config=identity_pipeline_config,
)
return [identity_pipeline_view]
def get_configuration_metadata(self, external_id):
# If a vercel team or user was already installed on another sentry org
# we want to make sure we don't overwrite the existing configurations. We
# keep all the configurations so that if one of them is deleted from vercel's
# side, the other sentry org will still have a working vercel integration.
try:
integration = Integration.objects.get(external_id=external_id, provider=self.key)
except Integration.DoesNotExist:
# first time setting up vercel team/user
return {}
return integration.metadata["configurations"]
def build_integration(self, state):
data = state["identity"]["data"]
access_token = data["access_token"]
team_id = data.get("team_id")
client = VercelClient(access_token, team_id)
if team_id:
external_id = team_id
installation_type = "team"
team = client.get_team()
name = team["name"]
else:
external_id = data["user_id"]
installation_type = "user"
user = client.get_user()
name = user.get("name") or user["username"]
try:
webhook = client.create_deploy_webhook()
except ApiError as err:
logger.info(
"vercel.create_webhook.failed",
extra={"error": six.text_type(err), "external_id": external_id},
)
try:
details = list(err.json["messages"][0].values()).pop()
except Exception:
details = "Unknown Error"
message = u"Could not create deployment webhook in Vercel: {}".format(details)
raise IntegrationError(message)
configurations = self.get_configuration_metadata(external_id)
integration = {
"name": name,
"external_id": external_id,
"metadata": {
"access_token": access_token,
"installation_id": data["installation_id"],
"installation_type": installation_type,
"webhook_id": webhook["id"],
"configurations": configurations,
},
"post_install_data": {"user_id": state["user_id"]},
}
return integration
def post_install(self, integration, organization, extra=None):
# add new configuration information to metadata
configurations = integration.metadata.get("configurations") or {}
configurations[integration.metadata["installation_id"]] = {
"access_token": integration.metadata["access_token"],
"webhook_id": integration.metadata["webhook_id"],
"organization_id": organization.id,
}
integration.metadata["configurations"] = configurations
integration.save()
# check if we have an installation already
if SentryAppInstallationForProvider.objects.filter(
organization=organization, provider="vercel"
).exists():
logger.info(
"vercel.post_install.installation_exists",
extra={"organization_id": organization.id},
)
return
user = User.objects.get(id=extra.get("user_id"))
data = {
"name": "Vercel Internal Integration",
"author": "Auto-generated by Sentry",
"organization": organization,
"overview": internal_integration_overview.strip(),
"user": user,
"scopes": ["project:releases", "project:read", "project:write"],
}
# create the internal integration and link it to the join table
sentry_app = InternalCreator.run(**data)
sentry_app_installation = SentryAppInstallation.objects.get(sentry_app=sentry_app)
SentryAppInstallationForProvider.objects.create(
sentry_app_installation=sentry_app_installation,
organization=organization,
provider="vercel",
)
| {
"content_hash": "4da4eb00fcda6ef6a20bc72e2b299b92",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 133,
"avg_line_length": 37.30930232558139,
"alnum_prop": 0.5979554946082404,
"repo_name": "beeftornado/sentry",
"id": "7eefb4912892e27bbe3f5bcb144ae5ac1408e6a5",
"size": "16043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/integrations/vercel/integration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
"""Module containing non-deprecated functions borrowed from Numeric.
"""
import functools
import types
import warnings
import numpy as np
from . import multiarray as mu
from . import overrides
from . import umath as um
from . import numerictypes as nt
from .multiarray import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
_gentype = types.GeneratorType
# save away Python sum
_sum_ = sum
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
bound = getattr(obj, method, None)
if bound is None:
return _wrapit(obj, method, *args, **kwds)
try:
return bound(*args, **kwds)
except TypeError:
# A TypeError occurs if the object does have such a method in its
# class, but its signature is not identical to that of NumPy's. This
# situation has occurred in the case of a downstream library like
# 'pandas'.
#
# Call _wrapit from within the except clause to ensure a potential
# exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
passkwargs = {k: v for k, v in kwargs.items()
if v is not np._NoValue}
if type(obj) is not mu.ndarray:
try:
reduction = getattr(obj, method)
except AttributeError:
pass
else:
# This branch is needed for reductions like any which don't
# support a dtype.
if dtype is not None:
return reduction(axis=axis, dtype=dtype, out=out, **passkwargs)
else:
return reduction(axis=axis, out=out, **passkwargs)
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
return (a, out)
@array_function_dispatch(_take_dispatcher)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : array_like (Ni..., M, Nk...)
The source array.
indices : array_like (Nj...)
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional (Ni..., Nj..., Nk...)
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if `mode='raise'`; use other modes for better performance.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray (Ni..., Nj..., Nk...)
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
take_along_axis : Take elements by matching the array and the index arrays
Notes
-----
By eliminating the inner loop in the description above, and using `s_` to
build simple slice objects, `take` can be expressed in terms of applying
fancy indexing to each 1-d slice::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
for ii in ndindex(Ni):
for kk in ndindex(Nj):
out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices]
For this reason, it is equivalent to (but faster than) the following use
of `apply_along_axis`::
out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a)
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
def _reshape_dispatcher(a, newshape, order=None):
return (a,)
# not deprecated --- copy if necessary, view otherwise
@array_function_dispatch(_reshape_dispatcher)
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raised when the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose makes the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
Traceback (most recent call last):
...
AttributeError: Incompatible shape for in-place modification. Use
`.reshape()` to make a copy with the desired shape.
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def _choose_dispatcher(a, choices, out=None, mode=None):
yield a
yield from choices
yield out
@array_function_dispatch(_choose_dispatcher)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a list of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of ``n`` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each ``i``. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode='raise'`` (the default), then, first of all, each element of
``a`` (and thus ``Ba``) must be in the range ``[0, n-1]``; now, suppose
that ``i`` (in that range) is the value at the ``(j0, j1, ..., jm)``
position in ``Ba`` - then the value at the same position in the new array
is the value in ``Bchoices[i]`` at that same position;
* if ``mode='wrap'``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode='clip'``, values in `a` (and thus ``Ba``) may be any (signed)
integer; negative integers are mapped to 0; values greater than ``n-1``
are mapped to ``n-1``; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in ``[0, n-1]``, where ``n`` is the
number of choices, unless ``mode=wrap`` or ``mode=clip``, in which
cases any integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype. Note that `out` is always
buffered if ``mode='raise'``; use other modes for better performance.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside ``[0, n-1]`` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod ``n``
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
numpy.take_along_axis : Preferable if `choices` is an array
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def _repeat_dispatcher(a, repeats, axis=None):
return (a,)
@array_function_dispatch(_repeat_dispatcher)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
unique : Find the unique elements of an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def _put_dispatcher(a, ind, v, mode=None):
return (a, ind, v)
@array_function_dispatch(_put_dispatcher)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers. In 'raise' mode,
if an exception occurs the target array may still be modified.
See Also
--------
putmask, place
put_along_axis : Put elements by matching the array and the index arrays
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError as e:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__)) from e
return put(ind, v, mode=mode)
def _swapaxes_dispatcher(a, axis1, axis2):
return (a,)
@array_function_dispatch(_swapaxes_dispatcher)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def _transpose_dispatcher(a, axes=None):
return (a,)
@array_function_dispatch(_transpose_dispatcher)
def transpose(a, axes=None):
"""
Reverse or permute the axes of an array; returns the modified array.
For an array a with two axes, transpose(a) gives the matrix transpose.
Refer to `numpy.ndarray.transpose` for full documentation.
Parameters
----------
a : array_like
Input array.
axes : tuple or list of ints, optional
If specified, it must be a tuple or list which contains a permutation of
[0,1,..,N-1] where N is the number of axes of a. The i'th axis of the
returned array will correspond to the axis numbered ``axes[i]`` of the
input. If not specified, defaults to ``range(a.ndim)[::-1]``, which
reverses the order of the axes.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
ndarray.transpose : Equivalent method
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
>>> x = np.ones((2, 3, 4, 5))
>>> np.transpose(x).shape
(5, 4, 3, 2)
"""
return _wrapfunc(a, 'transpose', axes)
def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_partition_dispatcher)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order of all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argpartition_dispatcher)
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a partitioned `a`.
More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
yields the partitioned `a`, irrespective of dimensionality.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort.
take_along_axis : Apply ``index_array`` from argpartition
to an array as if by calling partition.
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
Multi-dimensional array:
>>> x = np.array([[3, 4, 2], [1, 3, 1]])
>>> index_array = np.argpartition(x, kth=1, axis=-1)
>>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)
array([[2, 3, 4],
[1, 1, 3]])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def _sort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_sort_dispatcher)
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort or radix sort under the covers and, in general,
the actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
'heapsort' 3 O(n*log(n)) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
.. note:: The datatype determines which of 'mergesort' or 'timsort'
is actually used, even if 'mergesort' is specified. User selection
at a finer scale is not currently available.
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
When sorting does not make enough progress it switches to
`heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
This implementation makes quicksort O(n*log(n)) in the worst case.
'stable' automatically chooses the best stable sorting algorithm
for the data type being sorted.
It, along with 'mergesort' is currently mapped to
`timsort <https://en.wikipedia.org/wiki/Timsort>`_
or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
depending on the data type.
API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
.. versionadded:: 1.17.0
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
.. versionchanged:: 1.18.0
NaT now sorts to the end of arrays for consistency with NaN.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
# flatten returns (1, N) for np.matrix, so always use the last axis
a = asanyarray(a).flatten()
axis = -1
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def _argsort_dispatcher(a, axis=None, kind=None, order=None):
return (a,)
@array_function_dispatch(_argsort_dispatcher)
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with data type. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
take_along_axis : Apply ``index_array`` from argsort
to an array as if by calling sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def _argmax_dispatcher(a, axis=None, out=None):
return (a, out)
@array_function_dispatch(_argmax_dispatcher)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmax to an array as if by calling max.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
Indexes of the maximal elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmax(a, axis=None), a.shape)
>>> ind
(1, 2)
>>> a[ind]
15
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmax(x, axis=-1)
>>> # Same as np.max(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[4],
[3]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([4, 3])
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def _argmin_dispatcher(a, axis=None, out=None):
return (a, out)
@array_function_dispatch(_argmin_dispatcher)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
take_along_axis : Apply ``np.expand_dims(index_array, axis)``
from argmin to an array as if by calling min.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10, 11, 12],
[13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
Indices of the minimum elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argmin(a, axis=None), a.shape)
>>> ind
(0, 0)
>>> a[ind]
10
>>> b = np.arange(6) + 10
>>> b[4] = 10
>>> b
array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
>>> x = np.array([[4,2,3], [1,0,3]])
>>> index_array = np.argmin(x, axis=-1)
>>> # Same as np.min(x, axis=-1, keepdims=True)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
array([[2],
[0]])
>>> # Same as np.max(x, axis=-1)
>>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
array([2, 0])
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def _searchsorted_dispatcher(a, v, side=None, sorter=None):
return (a, v, sorter)
@array_function_dispatch(_searchsorted_dispatcher)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Assuming that `a` is sorted:
====== ============================
`side` returned index `i` satisfies
====== ============================
left ``a[i-1] < v <= a[i]``
right ``a[i-1] <= v < a[i]``
====== ============================
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def _resize_dispatcher(a, new_shape):
return (a,)
@array_function_dispatch(_resize_dispatcher)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated iterating over the array in C-order.
See Also
--------
np.reshape : Reshape an array without changing the total size.
np.pad : Enlarge and pad an array.
np.repeat : Repeat elements of an array.
ndarray.resize : resize an array in-place.
Notes
-----
When the total size of the array does not change `~numpy.reshape` should
be used. In most other cases either indexing (to reduce the size)
or padding (to increase the size) may be a more appropriate solution.
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, iterating
over `a` in C-order, disregarding axes (and cycling back from the start if
the new shape is larger). This functionality is therefore not suitable to
resize images, or data where each axis represents a separate and distinct
entity.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
new_size = 1
for dim_length in new_shape:
new_size *= dim_length
if dim_length < 0:
raise ValueError('all elements of `new_shape` must be non-negative')
if a.size == 0 or new_size == 0:
# First case must zero fill. The second would have repeats == 0.
return np.zeros_like(a, shape=new_shape)
repeats = -(-new_size // a.size) # ceil division
a = concatenate((a,) * repeats)[:new_size]
return reshape(a, new_shape)
def _squeeze_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_squeeze_dispatcher)
def squeeze(a, axis=None):
"""
Remove axes of length one from `a`.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the entries of length one in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`. Note that if all axes are squeezed,
the result is a 0d array and not a scalar.
Raises
------
ValueError
If `axis` is not None, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding entries of length one
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
>>> x = np.array([[1234]])
>>> x.shape
(1, 1)
>>> np.squeeze(x)
array(1234) # 0d array
>>> np.squeeze(x).shape
()
>>> np.squeeze(x)[()]
1234
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze', axis=axis)
if axis is None:
return squeeze()
else:
return squeeze(axis=axis)
def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
return (a,)
@array_function_dispatch(_diagonal_dispatcher)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, then a 1-D array containing the diagonal and of the
same type as `a` is returned unless `a` is a `matrix`, in which case
a 1-D array rather than a (2-D) `matrix` is returned in order to
maintain backward compatibility.
If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
are removed, and a new axis inserted at the end corresponding to the
diagonal.
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
The anti-diagonal can be obtained by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.fliplr(a).diagonal() # Horizontal flip
array([2, 4, 6])
>>> np.flipud(a).diagonal() # Vertical flip
array([6, 4, 2])
Note that the order in which the diagonal is retrieved varies depending
on the flip function.
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def _trace_dispatcher(
a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_trace_dispatcher)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def _ravel_dispatcher(a, order=None):
return (a,)
@array_function_dispatch(_ravel_dispatcher)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
y is an array of the same subtype as `a`, with shape ``(a.size,)``.
Note that matrices are special cased for backward compatibility, if `a`
is a matrix, then y is a 1-D ndarray.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.ravel(x)
array([1, 2, 3, 4, 5, 6])
>>> x.reshape(-1)
array([1, 2, 3, 4, 5, 6])
>>> np.ravel(x, order='F')
array([1, 4, 2, 5, 3, 6])
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> np.ravel(x.T)
array([1, 4, 2, 5, 3, 6])
>>> np.ravel(x.T, order='A')
array([1, 2, 3, 4, 5, 6])
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def _nonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_nonzero_dispatcher)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
.. note::
When called on a zero-d array or scalar, ``nonzero(a)`` is treated
as ``nonzero(atleast_1d(a))``.
.. deprecated:: 1.17.0
Use `atleast_1d` explicitly if this behavior is deliberate.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]])
>>> np.nonzero(x)
(array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9])
>>> a[a > 3] # prefer this spelling
array([4, 5, 6, 7, 8, 9])
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def _shape_dispatcher(a):
return (a,)
@array_function_dispatch(_shape_dispatcher)
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
len
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def _compress_dispatcher(condition, a, axis=None, out=None):
return (condition, a, out)
@array_function_dispatch(_compress_dispatcher)
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
extract : Equivalent method when working on 1-D arrays
:ref:`ufuncs-output-type`
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs):
return (a, a_min, a_max)
@array_function_dispatch(_clip_dispatcher)
def clip(a, a_min, a_max, out=None, **kwargs):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Equivalent to but faster than ``np.minimum(a_max, np.maximum(a, a_min))``.
No check is performed to ensure ``a_min < a_max``.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min, a_max : array_like or None
Minimum and maximum value. If ``None``, clipping is not performed on
the corresponding edge. Only one of `a_min` and `a_max` may be
``None``. Both are broadcast against `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
.. versionadded:: 1.17.0
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
:ref:`ufuncs-output-type`
Notes
-----
When `a_min` is greater than `a_max`, `clip` returns an
array in which all values are equal to `a_max`,
as shown in the second example.
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> np.clip(a, 8, 1)
array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3, 4, 1, 1, 1, 4, 4, 4, 4, 4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs)
def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_sum_dispatcher)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
Starting value for the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
add.reduce : Equivalent functionality of `add`.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
For floating point numbers the numerical precision of sum (and
``np.add.reduce``) is in general limited by directly adding each number
individually to the result causing rounding errors in every step.
However, often numpy will use a numerically better approach (partial
pairwise summation) leading to improved precision in many use-cases.
This improved precision is always provided when no ``axis`` is given.
When ``axis`` is given, it will depend on which axis is summed.
Technically, to provide the best speed possible, the improved precision
is only used when the summation is along the fast axis in memory.
Note that the exact precision may vary depending on other parameters.
In contrast to NumPy, Python's ``math.fsum`` function uses a slower but
more precise approach to summation.
Especially when summing a large number of lower precision floating point
numbers, such as ``float32``, numerical errors can become significant.
In such cases it can be advisable to use `dtype="float64"` to use a higher
precision for the output.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
>>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1)
array([1., 5.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
You can also start the sum with a value other than zero:
>>> np.sum([10], initial=5)
15
"""
if isinstance(a, _gentype):
# 2018-02-25, 1.15.0
warnings.warn(
"Calling np.sum(generator) is deprecated, and in the future will give a different result. "
"Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
DeprecationWarning, stacklevel=3)
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
initial=initial, where=where)
def _any_dispatcher(a, axis=None, out=None, keepdims=None, *,
where=np._NoValue):
return (a, where, out)
@array_function_dispatch(_any_dispatcher)
def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (``axis=None``) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in checking for any `True` values.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False])
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> np.any([[True, False], [False, False]], where=[[False], [True]])
False
>>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
return _wrapreduction(a, np.logical_or, 'any', axis, None, out,
keepdims=keepdims, where=where)
def _all_dispatcher(a, axis=None, out=None, keepdims=None, *,
where=None):
return (a, where, out)
@array_function_dispatch(_all_dispatcher)
def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (``axis=None``) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See :ref:`ufuncs-output-type` for more
details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in checking for all `True` values.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False])
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> np.all([[True, True], [False, True]], where=[[True], [False]])
True
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
"""
return _wrapreduction(a, np.logical_and, 'all', axis, None, out,
keepdims=keepdims, where=where)
def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumsum_dispatcher)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
``cumsum(a)[-1]`` may not be equal to ``sum(a)`` for floating-point
values since ``sum`` may use a pairwise summation routine, reducing
the roundoff-error. See `sum` for more information.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
``cumsum(b)[-1]`` may not be equal to ``sum(b)``
>>> b = np.array([1, 2e-9, 3e-9] * 1000000)
>>> b.cumsum()[-1]
1000000.0050045159
>>> b.sum()
1000000.0050000029
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
return (a, out)
@array_function_dispatch(_ptp_dispatcher)
def ptp(a, axis=None, out=None, keepdims=np._NoValue):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
.. warning::
`ptp` preserves the data type of the array. This means the
return value for an input of signed integers with n bits
(e.g. `np.int8`, `np.int16`, etc) is also a signed integer
with n bits. In that case, peak-to-peak values greater than
``2**(n-1)-1`` will be returned as negative values. An example
with a work-around is shown below.
Parameters
----------
a : array_like
Input values.
axis : None or int or tuple of ints, optional
Axis along which to find the peaks. By default, flatten the
array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.15.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `ptp` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.array([[4, 9, 2, 10],
... [6, 9, 7, 12]])
>>> np.ptp(x, axis=1)
array([8, 6])
>>> np.ptp(x, axis=0)
array([2, 0, 5, 2])
>>> np.ptp(x)
10
This example shows that a negative value can be returned when
the input is an array of signed integers.
>>> y = np.array([[1, 127],
... [0, 127],
... [-1, 127],
... [-2, 127]], dtype=np.int8)
>>> np.ptp(y, axis=1)
array([ 126, 127, -128, -127], dtype=int8)
A work-around is to use the `view()` method to view the result as
unsigned integers with the same bit width:
>>> np.ptp(y, axis=1).view(np.uint8)
array([126, 127, 128, 129], dtype=uint8)
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
ptp = a.ptp
except AttributeError:
pass
else:
return ptp(axis=axis, out=out, **kwargs)
return _methods._ptp(a, axis=axis, out=out, **kwargs)
def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amax_dispatcher)
def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The minimum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the maximum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> np.amax(a, where=[False, True], initial=-1, axis=0)
array([-1, 3])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.amax(b, where=~np.isnan(b), initial=-1)
4.0
>>> np.nanmax(b)
4.0
You can use an initial value to compute the maximum of an empty slice, or
to initialize it to a different value:
>>> np.max([[-50], [10]], axis=-1, initial=0)
array([ 0, 10])
Notice that the initial value is used as one of the elements for which the
maximum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
>>> np.max([5], initial=6)
6
>>> max([5], default=6)
5
"""
return _wrapreduction(a, np.maximum, 'max', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
where=None):
return (a, out)
@array_function_dispatch(_amin_dispatcher)
def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
where=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The maximum value of an output element. Must be present to allow
computation on empty slice. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to compare for the minimum. See `~numpy.ufunc.reduce`
for details.
.. versionadded:: 1.17.0
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> np.amin(a, where=[False, True], initial=10, axis=0)
array([10, 1])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.amin(b, where=~np.isnan(b), initial=10)
0.0
>>> np.nanmin(b)
0.0
>>> np.min([[-50], [10]], axis=-1, initial=0)
array([-50, 0])
Notice that the initial value is used as one of the elements for which the
minimum is determined, unlike for the default argument Python's max
function, which is only used for empty iterables.
Notice that this isn't the same as Python's ``default`` argument.
>>> np.min([6], initial=5)
5
>>> min([6], default=5)
6
"""
return _wrapreduction(a, np.minimum, 'min', axis, None, out,
keepdims=keepdims, initial=initial, where=where)
def _alen_dispathcer(a):
return (a,)
@array_function_dispatch(_alen_dispathcer)
def alen(a):
"""
Return the length of the first dimension of the input array.
.. deprecated:: 1.18
`numpy.alen` is deprecated, use `len` instead.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
# NumPy 1.18.0, 2019-08-02
warnings.warn(
"`np.alen` is deprecated, use `len` instead",
DeprecationWarning, stacklevel=2)
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
initial=None, where=None):
return (a, out)
@array_function_dispatch(_prod_dispatcher)
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
initial=np._NoValue, where=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.15.0
where : array_like of bool, optional
Elements to include in the product. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.17.0
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
:ref:`ufuncs-output-type`
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x)
16 # may vary
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
keepdims=keepdims, initial=initial, where=where)
def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
return (a, out)
@array_function_dispatch(_cumprod_dispatcher)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
:ref:`ufuncs-output-type`
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def _ndim_dispatcher(a):
return (a,)
@array_function_dispatch(_ndim_dispatcher)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def _size_dispatcher(a, axis=None):
return (a,)
@array_function_dispatch(_size_dispatcher)
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def _around_dispatcher(a, decimals=None, out=None):
return (a, out)
@array_function_dispatch(_around_dispatcher)
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See :ref:`ufuncs-output-type` for more
details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
``np.around`` uses a fast but sometimes inexact algorithm to round
floating-point datatypes. For positive `decimals` it is equivalent to
``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
error due to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling by powers
of ten. For instance, note the extra "1" in the following:
>>> np.round(56294995342131.5, 3)
56294995342131.51
If your goal is to print such values with a fixed number of decimals, it is
preferable to use numpy's float printing routines to limit the number of
printed decimals:
>>> np.format_float_positional(56294995342131.5, precision=3)
'56294995342131.5'
The float printing routines use an accurate but much more computationally
demanding algorithm to compute the number of digits after the decimal
point.
Alternatively, Python's builtin `round` function uses a more accurate
but slower algorithm for 64-bit floating point values:
>>> round(56294995342131.5, 3)
56294995342131.5
>>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997
(16.06, 16.05)
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None, *,
where=None):
return (a, where, out)
@array_function_dispatch(_mean_dispatcher)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See :ref:`ufuncs-output-type` for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
By default, `float16` results are computed using `float32` intermediates
for extra precision.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([2., 3.])
>>> np.mean(a, axis=1)
array([1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806 # may vary
Specifying a where argument:
>>> a = np.array([[5, 9, 13], [14, 10, 12], [11, 15, 19]])
>>> np.mean(a)
12.0
>>> np.mean(a, where=[[True], [False], [False]])
9.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def _std_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
keepdims=None, *, where=None):
return (a, where, out)
@array_function_dispatch(_std_dispatcher)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the standard deviation.
See `~numpy.ufunc.reduce` for details.
.. versionadded:: 1.20.0
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
:ref:`ufuncs-output-type`
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(x))``, where
``x = abs(a - a.mean())**2``.
The average squared deviation is typically calculated as ``x.sum() / N``,
where ``N = len(x)``. If, however, `ddof` is specified, the divisor
``N - ddof`` is used instead. In standard statistical practice, ``ddof=1``
provides an unbiased estimator of the variance of the infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables. The standard deviation computed in this
function is the square root of the estimated variance, so even with
``ddof=1``, it will not be an unbiased estimate of the standard deviation
per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177 # may vary
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.std(a)
2.614064523559687 # may vary
>>> np.std(a, where=[[True], [True], [False]])
2.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def _var_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
keepdims=None, *, where=None):
return (a, where, out)
@array_function_dispatch(_var_dispatcher)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *,
where=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
where : array_like of bool, optional
Elements to include in the variance. See `~numpy.ufunc.reduce` for
details.
.. versionadded:: 1.20.0
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std, mean, nanmean, nanstd, nanvar
:ref:`ufuncs-output-type`
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(x)``, where ``x = abs(a - a.mean())**2``.
The mean is typically calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759 # may vary
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
Specifying a where argument:
>>> a = np.array([[14, 8, 11, 10], [7, 9, 10, 11], [10, 15, 5, 10]])
>>> np.var(a)
6.833333333333333 # may vary
>>> np.var(a, where=[[True], [True], [False]])
4.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if where is not np._NoValue:
kwargs['where'] = where
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
# Aliases of other functions. These have their own definitions only so that
# they can have unique docstrings.
@array_function_dispatch(_around_dispatcher)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return around(a, decimals=decimals, out=out)
@array_function_dispatch(_prod_dispatcher, verify=False)
def product(*args, **kwargs):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
return prod(*args, **kwargs)
@array_function_dispatch(_cumprod_dispatcher, verify=False)
def cumproduct(*args, **kwargs):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return cumprod(*args, **kwargs)
@array_function_dispatch(_any_dispatcher, verify=False)
def sometrue(*args, **kwargs):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function; see for details.
"""
return any(*args, **kwargs)
@array_function_dispatch(_all_dispatcher, verify=False)
def alltrue(*args, **kwargs):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
return all(*args, **kwargs)
| {
"content_hash": "3d3d4593bfde2f78fa82d79c5650389f",
"timestamp": "",
"source": "github",
"line_count": 3789,
"max_line_length": 103,
"avg_line_length": 32.40353655318026,
"alnum_prop": 0.6027594744944086,
"repo_name": "ryfeus/lambda-packs",
"id": "65a42eb1ee72c7843efbf8a2cf8f8c47d0b17644",
"size": "122777",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ONNX/lambda-onnx/numpy/core/fromnumeric.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
"""
<comment-ja>
libvirtの仮想ネットワークの設定を生成する
</comment-ja>
<comment-en>
Generate configuration file of virtual networks for libvirt.
</comment-en>
@file: config_network.py
@author: Taizo ITO <[email protected]>
@copyright:
"""
import time
import os, stat
import re
import errno
from StringIO import StringIO
from xml.dom.minidom import DOMImplementation
implementation = DOMImplementation()
import karesansui
from karesansui.lib.const import KARESANSUI_GROUP, \
VIRT_NETWORK_CONFIG_DIR
from karesansui.lib.utils import get_xml_parse as XMLParse
from karesansui.lib.utils import get_xml_xpath as XMLXpath
from karesansui.lib.utils import r_chgrp, r_chmod
from karesansui.lib.networkaddress import NetworkAddress
from karesansui.lib.file.configfile import ConfigFile
class KaresansuiNetworkConfigParamException(karesansui.KaresansuiLibException):
pass
class NetworkConfigParam:
def __init__(self, arg):
if isinstance(arg, basestring):
# expect name as string
self.name = arg
self.uuid = None
self.bridge = None
self.forward_dev = None
self.forward_mode = None
self.ipaddr = None
self.netmask = None
self.dhcp_start = None
self.dhcp_end = None
self.bridge_stp = None
self.bridge_forwardDelay = None
else:
# expect dict in KaresansuiVirtNetwork#get_info() format
self.name = arg['name']
self.uuid = arg['uuid']
self.bridge = arg['bridge']['name']
self.forward_dev = arg['forward']['dev']
self.forward_mode = arg['forward']['mode']
self.ipaddr = arg['ip']['address']
self.netmask = arg['ip']['netmask']
self.dhcp_start = arg['dhcp']['start']
self.dhcp_end = arg['dhcp']['end']
try:
self.bridge_stp = arg['bridge']['stp']
except:
self.bridge_stp = None
try:
self.bridge_forwardDelay = arg['bridge']['forwardDelay']
except:
self.bridge_forwardDelay = None
def get_network_name(self):
return self.name
def set_uuid(self, uuid):
self.uuid = uuid
def get_uuid(self):
return self.uuid
def set_bridge(self, bridge):
"""
@param bridge: name of the bridge
@type bridge: string
@return nothing
"""
if bridge is not None:
self.bridge = str(bridge)
def get_bridge(self):
return self.bridge
def set_forward_dev(self, device):
if device is not None:
self.forward_dev = str(device)
def get_forward_dev(self):
return self.forward_dev
def set_forward_mode(self, mode='nat'):
if mode is not None:
self.forward_mode = str(mode)
def get_forward_mode(self):
return self.forward_mode
def set_default_networks(self, addr, dhcp_start=None, dhcp_end=None):
self.set_netmask(NetworkAddress(addr).get('netmask'))
self.set_ipaddr(NetworkAddress(addr).get('first_ip'))
if not dhcp_start:
dhcp_start = NetworkAddress(addr).get('first_ip')
if not dhcp_end:
dhcp_end = NetworkAddress(addr).get('last_ip')
self.set_dhcp_start(dhcp_start)
self.set_dhcp_end(dhcp_end)
def get_networks(self):
return {"ipaddr": self.ipaddr, "netmask":self.netmask,
"dhcp_start": self.dhcp_start, "dhcp_stop":self.dhcp_stop}
def set_ipaddr(self, addr):
if addr is not None:
self.ipaddr = str(addr)
def get_ipaddr(self):
return self.ipaddr
def set_netmask(self, addr):
if addr is not None:
self.netmask = str(addr)
def get_netmask(self):
return self.netmask
def set_ipaddr_and_netmask(self, addr):
"""
Set ip address and netmask from '192.168.0.1/24' or '192.168.0.1/255.255.255.0' styled strings.
@param addr: Strings like '192.168.0.1/24' or '192.168.0.1/255.255.255.0'.
@type addr: string
@return: nothing
"""
na = NetworkAddress(addr)
self.set_ipaddr(na.get('ipaddr'))
self.set_netmask(na.get('netmask'))
def set_dhcp_start(self, addr):
if addr is not None:
self.dhcp_start = str(addr)
def get_dhcp_start(self):
return self.dhcp_start
def set_dhcp_end(self, addr):
if addr is not None:
self.dhcp_end = str(addr)
def get_dhcp_end(self):
return self.dhcp_end
def set_bridge_stp(self, stp='on'):
if stp is not None:
self.bridge_stp = str(stp)
def get_bridge_stp(self):
return self.bridge_stp
def set_bridge_forwardDelay(self, forwardDelay):
if forwardDelay is not None:
self.bridge_forwardDelay = str(forwardDelay)
def get_bridge_forwardDelay(self):
return self.bridge_forwardDelay
def load_xml_config(self,path):
if not os.path.exists(path):
raise KaresansuiNetworkConfigParamException("no such file: %s" % path)
document = XMLParse(path)
uuid = XMLXpath(document,'/network/uuid/text()')
self.set_uuid(str(uuid))
bridge = XMLXpath(document,'/network/bridge/@name')
self.set_bridge(bridge)
forward_dev = XMLXpath(document,'/network/forward/@dev')
if forward_dev:
self.set_forward_dev(forward_dev)
forward_mode = XMLXpath(document,'/network/forward/@mode')
if forward_mode:
self.set_forward_mode(forward_mode)
ipaddr = XMLXpath(document,'/network/ip/@address')
self.set_ipaddr(ipaddr)
netmask = XMLXpath(document,'/network/ip/@netmask')
self.set_netmask(netmask)
dhcp_start = XMLXpath(document,'/network/ip/dhcp/range/@start')
self.set_dhcp_start(dhcp_start)
dhcp_end = XMLXpath(document,'/network/ip/dhcp/range/@end')
self.set_dhcp_end(dhcp_end)
bridge_stp = XMLXpath(document,'/network/bridge/@stp')
self.set_bridge_stp(bridge_stp)
bridge_forwardDelay = XMLXpath(document,'/network/bridge/@forwardDelay')
self.set_bridge_forwardDelay(bridge_forwardDelay)
def validate(self):
if not self.uuid:
raise KaresansuiNetworkConfigParamException("ConfigParam: uuid is None")
if not self.name or not len(self.name):
raise KaresansuiNetworkConfigParamException("ConfigParam: illegal name")
class NetworkXMLGenerator:
def _create_text_node(self, tag, txt):
node = self.document.createElement(tag)
self._add_text(node, txt)
return node
def _add_text(self, node, txt):
txt_n = self.document.createTextNode(txt)
node.appendChild(txt_n)
def generate(self, config):
tree = self.generate_xml_tree(config)
out = StringIO()
out.write(tree.toxml())
return out.getvalue()
class NetworkXMLConfigGenerator(NetworkXMLGenerator):
def __init__(self):
self.config_dir = VIRT_NETWORK_CONFIG_DIR
def generate_xml_tree(self, config):
config.validate()
self.config = config
self.begin_build()
self.build_bridge()
self.build_forward()
self.build_ip()
self.end_build()
return self.document
def begin_build(self):
self.document = implementation.createDocument(None,None,None)
self.network = self.document.createElement("network")
name = self._create_text_node("name", self.config.get_network_name())
uuid = self._create_text_node("uuid", self.config.get_uuid())
self.network.appendChild(name)
self.network.appendChild(uuid)
self.document.appendChild(self.network)
def build_bridge(self):
doc = self.document
if self.config.get_bridge():
bridge = doc.createElement("bridge")
bridge.setAttribute("name", self.config.get_bridge())
if self.config.get_bridge_stp() is not None:
bridge.setAttribute("stp", self.config.get_bridge_stp())
else:
bridge.setAttribute("stp", "on")
if self.config.get_bridge_forwardDelay() is not None:
bridge.setAttribute("forwardDelay", self.config.get_bridge_forwardDelay())
else:
bridge.setAttribute("forwardDelay", "0")
self.network.appendChild(bridge)
def build_forward(self):
doc = self.document
if self.config.get_forward_dev() is not None or \
self.config.get_forward_mode() is not None:
forward = doc.createElement("forward")
if self.config.get_forward_dev() is not None:
forward.setAttribute("dev", self.config.get_forward_dev())
if self.config.get_forward_mode() is not None:
forward.setAttribute("mode", self.config.get_forward_mode())
self.network.appendChild(forward)
def build_ip(self):
doc = self.document
ip = doc.createElement("ip")
ip.setAttribute("netmask", self.config.get_netmask())
ip.setAttribute("address", self.config.get_ipaddr())
self.network.appendChild(ip)
dhcp = doc.createElement("dhcp")
range = doc.createElement("range")
range.setAttribute("start", self.config.get_dhcp_start())
range.setAttribute("end", self.config.get_dhcp_end())
dhcp.appendChild(range)
ip.appendChild(dhcp)
def end_build(self):
pass
def writecfg(self,cfg):
try:
os.makedirs(self.config_dir)
except OSError, (err, msg):
if err != errno.EEXIST:
raise OSError(err,msg)
filename = "%s/%s.xml" %(self.config_dir,self.config.get_network_name())
ConfigFile(filename).write(cfg)
r_chmod(filename,"o-rwx")
r_chmod(filename,"g+rw")
if os.getuid() == 0:
r_chgrp(filename,KARESANSUI_GROUP)
| {
"content_hash": "c1959fb9758fbeca179f9345d2f21aaa",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 103,
"avg_line_length": 32.27301587301587,
"alnum_prop": 0.6053511705685619,
"repo_name": "karesansui/karesansui",
"id": "3dc48f591945baa6f3f37b4808544aff1264036e",
"size": "11386",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "karesansui/lib/virt/config_network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79865"
},
{
"name": "HTML",
"bytes": "32774"
},
{
"name": "JavaScript",
"bytes": "286445"
},
{
"name": "Makefile",
"bytes": "265"
},
{
"name": "Python",
"bytes": "2226164"
},
{
"name": "Shell",
"bytes": "18293"
}
],
"symlink_target": ""
} |
import smtplib
def main():
hostname = 'smtp.gmail.com'
port = 587
username = '[email protected]'
password = 'testpassword'
sender = '[email protected]'
receivers = ['[email protected]']
message = '''From: J.L. Young <[email protected]>
To: JLY Receiver <[email protected]>
Subject: Sending e-mail from gmail
This appears to work.
Cheers!
'''
try:
server = smtplib.SMTP(hostname, port)
server.set_debuglevel(1)
server.ehlo()
server.starttls()
server.login(username,password)
response = server.sendmail(sender, receivers, message)
server.quit()
print response
except Exception as e:
print e
if __name__ == '__main__':
main() | {
"content_hash": "a7f6f7ac90a458fcf2b94b7b514ba192",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 65,
"avg_line_length": 24.06451612903226,
"alnum_prop": 0.7184986595174263,
"repo_name": "jlyoung/stackoverflow_answers",
"id": "e263f97803698c50429122f4bbe2eccc0d04cc68",
"size": "746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smtplib_send_mail_from_gmail/sendfromgmailsanitized.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8838"
}
],
"symlink_target": ""
} |
"""
Russell RGB Colourspace
=======================
Defines the *Russell RGB* colourspace:
- :attr:`colour.models.RGB_COLOURSPACE_RUSSELL_RGB`.
References
----------
- :cite:`Cottrella` : Cottrell, R. (n.d.). The Russell RGB working color
space. http://www.russellcottrell.com/photo/downloads/RussellRGB.icc
"""
from __future__ import annotations
import numpy as np
from functools import partial
from colour.colorimetry.datasets import CCS_ILLUMINANTS
from colour.hints import NDArray
from colour.models.rgb import (
RGB_Colourspace,
gamma_function,
normalised_primary_matrix,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"PRIMARIES_RUSSELL_RGB",
"WHITEPOINT_NAME_RUSSELL_RGB",
"CCS_WHITEPOINT_RUSSELL_RGB",
"MATRIX_RUSSELL_RGB_TO_XYZ",
"MATRIX_XYZ_TO_RUSSELL_RGB",
"RGB_COLOURSPACE_RUSSELL_RGB",
]
PRIMARIES_RUSSELL_RGB: NDArray = np.array(
[
[0.6900, 0.3100],
[0.1800, 0.7700],
[0.1000, 0.0200],
]
)
"""*Russell RGB* colourspace primaries."""
WHITEPOINT_NAME_RUSSELL_RGB: str = "D55"
"""*Russell RGB* colourspace whitepoint name."""
CCS_WHITEPOINT_RUSSELL_RGB: NDArray = CCS_ILLUMINANTS[
"CIE 1931 2 Degree Standard Observer"
][WHITEPOINT_NAME_RUSSELL_RGB]
"""*Russell RGB* colourspace whitepoint chromaticity coordinates."""
MATRIX_RUSSELL_RGB_TO_XYZ: NDArray = normalised_primary_matrix(
PRIMARIES_RUSSELL_RGB, CCS_WHITEPOINT_RUSSELL_RGB
)
"""*Russell RGB* colourspace to *CIE XYZ* tristimulus values matrix."""
MATRIX_XYZ_TO_RUSSELL_RGB: NDArray = np.linalg.inv(MATRIX_RUSSELL_RGB_TO_XYZ)
"""*CIE XYZ* tristimulus values to *Russell RGB* colourspace matrix."""
RGB_COLOURSPACE_RUSSELL_RGB: RGB_Colourspace = RGB_Colourspace(
"Russell RGB",
PRIMARIES_RUSSELL_RGB,
CCS_WHITEPOINT_RUSSELL_RGB,
WHITEPOINT_NAME_RUSSELL_RGB,
MATRIX_RUSSELL_RGB_TO_XYZ,
MATRIX_XYZ_TO_RUSSELL_RGB,
partial(gamma_function, exponent=1 / 2.2),
partial(gamma_function, exponent=2.2),
)
RGB_COLOURSPACE_RUSSELL_RGB.__doc__ = """
*Russell RGB* colourspace.
References
----------
:cite:`Cottrella`
"""
| {
"content_hash": "ce91ea3ec6223086cac0855fd1211995",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 78,
"avg_line_length": 27.68235294117647,
"alnum_prop": 0.6914577135571611,
"repo_name": "colour-science/colour",
"id": "b9c1769bbb587e1efc632ae25c265ed6cb21d81e",
"size": "2353",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "colour/models/rgb/datasets/russell_rgb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7967270"
},
{
"name": "TeX",
"bytes": "163213"
},
{
"name": "Visual Basic 6.0",
"bytes": "1170"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from mock import MagicMock
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductConsolidator import ProductConsolidator
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductModels import ItemBase
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductModels import Soap
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductModels import PreShave
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductModels import Brush
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductModels import Razor
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductModels import Blade
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductModels import PostShave
from src.main.com.rowley.shavekeeper.productdatacompiler.models.ProductModels import AfterShave
class TestProductConsolidator(TestCase):
def test_add_item_to_map(self):
product_consolidator = ProductConsolidator()
brand = "I'm a brand"
model = "I'm a model"
item = ItemBase(brand, model)
product_consolidator.__add_item_to_map__(item, product_consolidator.pre_shaves)
self.assertTrue(item.brand in product_consolidator.pre_shaves)
self.assertTrue(item.model in product_consolidator.pre_shaves[item.brand])
self.assertEqual(item, product_consolidator.pre_shaves[item.brand][item.model])
def test_add_item_to_map_only_adds_one(self):
product_consolidator = ProductConsolidator()
brand = "I'm a brand"
model = "I'm a model"
item = ItemBase(brand, model)
product_consolidator.__add_item_to_map__(item, product_consolidator.pre_shaves)
item_two = ItemBase(brand, model)
product_consolidator.__add_item_to_map__(item_two, product_consolidator.pre_shaves)
self.assertTrue(item.brand in product_consolidator.pre_shaves)
self.assertTrue(item.model in product_consolidator.pre_shaves[item.brand])
self.assertEqual(item, product_consolidator.pre_shaves[item.brand][item.model])
def set_up_mock(self):
product_consolidator = ProductConsolidator()
product_consolidator.__add_item_to_map__ = MagicMock(return_value=True)
return product_consolidator
def test_add_pre_shave(self):
product_consolidator = self.set_up_mock()
pre_shave = PreShave
product_consolidator.add_pre_shave(pre_shave)
product_consolidator.__add_item_to_map__.assert_called_with(pre_shave, product_consolidator.pre_shaves)
def test_add_soap(self):
product_consolidator = self.set_up_mock()
soap = Soap
product_consolidator.add_soap(soap)
product_consolidator.__add_item_to_map__.assert_called_with(soap, product_consolidator.soaps)
def test_add_brush(self):
product_consolidator = self.set_up_mock()
brush = Brush
product_consolidator.add_brush(brush)
product_consolidator.__add_item_to_map__.assert_called_with(brush, product_consolidator.brushes)
def test_add_razor(self):
product_consolidator = self.set_up_mock()
razor = Razor
product_consolidator.add_razor(razor)
product_consolidator.__add_item_to_map__.assert_called_with(razor, product_consolidator.razors)
def test_add_blade(self):
product_consolidator = self.set_up_mock()
blade = Blade
product_consolidator.add_blade(blade)
product_consolidator.__add_item_to_map__.assert_called_with(blade, product_consolidator.blades)
def test_add_post_shave(self):
product_consolidator = self.set_up_mock()
post_shave = PostShave
product_consolidator.add_post_shave(post_shave)
product_consolidator.__add_item_to_map__.assert_called_with(post_shave, product_consolidator.post_shaves)
def test_add_after_shave(self):
product_consolidator = self.set_up_mock()
after_shave = AfterShave
product_consolidator.add_after_shave(after_shave)
product_consolidator.__add_item_to_map__.assert_called_with(after_shave, product_consolidator.after_shaves)
def test_eq_same_instance(self):
product_consolidator = ProductConsolidator()
self.assertEqual(product_consolidator, product_consolidator)
def test_eq_different_types(self):
self.assertNotEqual(ProductConsolidator(), Brush())
def create_test_product_consolidator(self):
product_consolidator = ProductConsolidator()
pre_shave_1 = PreShave("Preshave 1 brand", "Preshave 1 model")
product_consolidator.add_pre_shave(pre_shave_1)
pre_shave_2 = PreShave("Preshave 2 brand", "Preshave 2 model")
product_consolidator.add_pre_shave(pre_shave_2)
pre_shave_3 = PreShave("Preshave 2 brand", "Preshave 3 model")
product_consolidator.add_pre_shave(pre_shave_3)
soap_1 = Soap("Soap 1 brand", "Soap 1 model")
product_consolidator.add_soap(soap_1)
soap_2 = Soap("Soap 1 brand", "Soap 2 model")
product_consolidator.add_soap(soap_2)
soap_3 = Soap("Soap 1 brand", "Soap 3 model")
product_consolidator.add_soap(soap_3)
brush = Brush("brush brand", "brush model")
product_consolidator.add_brush(brush)
razor_1 = Razor("Razor 1 brand", "Razor 1 model", "DE", True, False)
product_consolidator.add_razor(razor_1)
razor_2 = Razor("Razor 2 brand", "Razor 2 model", "Straight Blade", False, False)
product_consolidator.add_razor(razor_2)
blade_1 = Blade("Blade 1 brand", "Blade 1 model")
product_consolidator.add_blade(blade_1)
blade_2 = Blade("Blade 1 brand", "Blade 2 model")
product_consolidator.add_blade(blade_2)
blade_3 = Blade("Blade 3 brand", "Blade 3 model")
product_consolidator.add_blade(blade_3)
blade_4 = Blade("Blade 4 brand", "Blade 4 model")
product_consolidator.add_blade(blade_4)
blade_5 = Blade("Blade 1 brand", "Blade 5 model")
product_consolidator.add_blade(blade_5)
post_shave_1 = PostShave("Post Shave 1 brand", "Post Shave 1 model")
product_consolidator.add_post_shave(post_shave_1)
post_shave_2 = PostShave("Post Shave 2 brand", "Post Shave 2 model")
product_consolidator.add_post_shave(post_shave_2)
after_shave_1 = AfterShave("AfterShave 1 brand", "AfterShave 1 model")
product_consolidator.add_after_shave(after_shave_1)
return product_consolidator
product_consolidator_json_string = '{"after_shaves":{"AfterShave 1 brand":{"AfterShave 1 model":{"brand":"' \
'AfterShave 1 brand","model":"AfterShave 1 model"}}},"blades":{"Blade 1 brand"' \
':{"Blade 1 model":{"brand":"Blade 1 brand","model":"Blade 1 model"},"Blade 2' \
' model":{"brand":"Blade 1 brand","model":"Blade 2 model"},"Blade 5 model":{' \
'"brand":"Blade 1 brand","model":"Blade 5 model"}},"Blade 3 brand":{"Blade 3' \
' model":{"brand":"Blade 3 brand","model":"Blade 3 model"}},"Blade 4 brand":{' \
'"Blade 4 model":{"brand":"Blade 4 brand","model":"Blade 4 model"}}},"brushes"' \
':{"brush brand":{"brush model":{"brand":"brush brand",' \
'"model":"brush model"}}},"post_shaves":{"Post Shave 1 ' \
'brand":{"Post Shave 1 model":{"brand":"Post Shave 1 brand","model":"Post ' \
'Shave 1 model"}},"Post Shave 2 brand":{"Post Shave 2 model":{"brand":"Post ' \
'Shave 2 brand","model":"Post Shave 2 model"}}},"pre_shaves":{"Preshave 1 ' \
'brand":{"Preshave 1 model":{"brand":"Preshave 1 brand","model":"Preshave 1 ' \
'model"}},"Preshave 2 brand":{"Preshave 2 model":{"brand":"Preshave 2 brand",' \
'"model":"Preshave 2 model"},"Preshave 3 model":{"brand":"Preshave 2 brand",' \
'"model":"Preshave 3 model"}}},"razors":{"Razor 1 brand":{"Razor 1 model":{' \
'"brand":"Razor 1 brand","is_adjustable":false,"model":"Razor 1 model","' \
'razor_type":"DE","uses_blade":true}},"Razor 2 brand":{"Razor 2 model":{' \
'"brand":"Razor 2 brand","is_adjustable":false,"model":"Razor 2 model",' \
'"razor_type":"Straight Blade","uses_blade":false}}},"soaps":{"Soap 1 brand":' \
'{"Soap 1 model":{"brand":"Soap 1 brand","model":"Soap 1 model"},"Soap 2 ' \
'model":{"brand":"Soap 1 brand","model":"Soap 2 model"},"Soap 3 model":{' \
'"brand":"Soap 1 brand","model":"Soap 3 model"}}}}'
def test_eq_same_instance(self):
product_consolidator = ProductConsolidator()
self.assertEqual(product_consolidator, product_consolidator)
def test_eq_different_types(self):
self.assertNotEqual(ProductConsolidator(), Brush())
def test_eq_equivalent_instances(self):
compiler1 = self.create_test_product_consolidator()
compiler2 = self.create_test_product_consolidator()
self.assertEqual(compiler1, compiler2)
def test_eq_non_equivalent_instances(self):
compiler1 = self.create_test_product_consolidator()
compiler2 = self.create_test_product_consolidator()
compiler2.pre_shaves = {}
self.assertNotEqual(compiler1, compiler2)
def test_to_JSON(self):
product_consolidator = self.create_test_product_consolidator()
json = product_consolidator.to_json()
self.assertEquals(json, self.product_consolidator_json_string)
def test_from_JSON(self):
product_consolidator = ProductConsolidator.from_json(self.product_consolidator_json_string)
reference_consolidator = self.create_test_product_consolidator()
self.assertEquals(product_consolidator, reference_consolidator)
| {
"content_hash": "6a707546917031697bb9c404d12e5698",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 120,
"avg_line_length": 54.738219895287955,
"alnum_prop": 0.6287900526064084,
"repo_name": "alphonzo79/ShaveKeeper",
"id": "e0d7202bfad1134831b25fbedcd368b44cd4b233",
"size": "10455",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ProductDataCompiler/src/test/com/rowley/shavekeeper/productdatacompiler/models/test_productConsolidator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "93696"
}
],
"symlink_target": ""
} |
"""
Create the list of programs that will be executed.
The result is a list of tuples of the following format :
(function_id, schedule_id)
Example :
(function524, function524_schedule_125)
"""
import pickle
from pathlib import Path
# Path to the directory containing the programs
data_path = Path("/data/scratch/henni-mohammed/data3/programs/")
# Path to where to store the list of programs
dst_path = Path("progs_list.pickle")
progs_list = []
for func_path in data_path.iterdir():
# We discard programs that have no schedule.
# We don't need to execute those programs as they just have a speedup of 1,
# and they have no programs with schedules.
# If you want them is the dataset, just include them with speedup = 1.
if len(list(func_path.iterdir())) <= 2:
continue
for sched_path in func_path.iterdir():
if not sched_path.is_dir():
continue
func_id = func_path.parts[-1]
sched_id = sched_path.parts[-1]
progs_list.append((func_id, sched_id))
with open(dst_path, "wb") as f:
pickle.dump(progs_list, f)
| {
"content_hash": "f5fd4de664095ad1228669ad037c37cd",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 28.15,
"alnum_prop": 0.6580817051509769,
"repo_name": "rbaghdadi/COLi",
"id": "f63910d04efeb98ecaf13b504bec3e1931355e7e",
"size": "1126",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/code_generator/time_measurement/create_progs_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "15030"
},
{
"name": "C++",
"bytes": "380296"
},
{
"name": "Makefile",
"bytes": "6535"
}
],
"symlink_target": ""
} |
"""Higher level child and data watching API's.
:Maintainer: Ben Bangert <[email protected]>
:Status: Production
.. note::
:ref:`DataWatch` and :ref:`ChildrenWatch` may only handle a single
function, attempts to associate a single instance with multiple functions
will result in an exception being thrown.
"""
import logging
import time
import warnings
from functools import partial, wraps
from kazoo.retry import KazooRetry
from kazoo.exceptions import (
ConnectionClosedError,
NoNodeError,
KazooException
)
from kazoo.protocol.states import KazooState
log = logging.getLogger(__name__)
_STOP_WATCHING = object()
def _ignore_closed(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except ConnectionClosedError:
pass
return wrapper
class DataWatch(object):
"""Watches a node for data updates and calls the specified
function each time it changes
The function will also be called the very first time its
registered to get the data.
Returning `False` from the registered function will disable future
data change calls. If the client connection is closed (using the
close command), the DataWatch will no longer get updates.
If the function supplied takes three arguments, then the third one
will be a :class:`~kazoo.protocol.states.WatchedEvent`. It will
only be set if the change to the data occurs as a result of the
server notifying the watch that there has been a change. Events
like reconnection or the first call will not include an event.
If the node does not exist, then the function will be called with
``None`` for all values.
.. tip::
Because :class:`DataWatch` can watch nodes that don't exist, it
can be used alternatively as a higher-level Exists watcher that
survives reconnections and session loss.
Example with client:
.. code-block:: python
@client.DataWatch('/path/to/watch')
def my_func(data, stat):
print("Data is %s" % data)
print("Version is %s" % stat.version)
# Above function is called immediately and prints
# Or if you want the event object
@client.DataWatch('/path/to/watch')
def my_func(data, stat, event):
print("Data is %s" % data)
print("Version is %s" % stat.version)
print("Event is %s" % event)
.. versionchanged:: 1.2
DataWatch now ignores additional arguments that were previously
passed to it and warns that they are no longer respected.
"""
def __init__(self, client, path, func=None, *args, **kwargs):
"""Create a data watcher for a path
:param client: A zookeeper client.
:type client: :class:`~kazoo.client.KazooClient`
:param path: The path to watch for data changes on.
:type path: str
:param func: Function to call initially and every time the
node changes. `func` will be called with a
tuple, the value of the node and a
:class:`~kazoo.client.ZnodeStat` instance.
:type func: callable
"""
self._client = client
self._path = path
self._func = func
self._stopped = False
self._run_lock = client.handler.lock_object()
self._version = None
self._retry = KazooRetry(max_tries=None,
sleep_func=client.handler.sleep_func)
self._include_event = None
self._ever_called = False
self._used = False
if args or kwargs:
warnings.warn('Passing additional arguments to DataWatch is'
' deprecated. ignore_missing_node is now assumed '
' to be True by default, and the event will be '
' sent if the function can handle receiving it',
DeprecationWarning, stacklevel=2)
# Register our session listener if we're going to resume
# across session losses
if func is not None:
self._used = True
self._client.add_listener(self._session_watcher)
self._get_data()
def __call__(self, func):
"""Callable version for use as a decorator
:param func: Function to call initially and every time the
data changes. `func` will be called with a
tuple, the value of the node and a
:class:`~kazoo.client.ZnodeStat` instance.
:type func: callable
"""
if self._used:
raise KazooException(
"A function has already been associated with this "
"DataWatch instance.")
self._func = func
self._used = True
self._client.add_listener(self._session_watcher)
self._get_data()
return func
def _log_func_exception(self, data, stat, event=None):
try:
# For backwards compatibility, don't send event to the
# callback unless the send_event is set in constructor
if not self._ever_called:
self._ever_called = True
try:
result = self._func(data, stat, event)
except TypeError:
result = self._func(data, stat)
if result is False:
self._stopped = True
self._client.remove_listener(self._session_watcher)
except Exception as exc:
log.exception(exc)
raise
@_ignore_closed
def _get_data(self, event=None):
# Ensure this runs one at a time, possible because the session
# watcher may trigger a run
with self._run_lock:
if self._stopped:
return
initial_version = self._version
try:
data, stat = self._retry(self._client.get,
self._path, self._watcher)
except NoNodeError:
data = None
# This will set 'stat' to None if the node does not yet
# exist.
stat = self._retry(self._client.exists, self._path,
self._watcher)
if stat:
self._client.handler.spawn(self._get_data)
return
# No node data, clear out version
if stat is None:
self._version = None
else:
self._version = stat.mzxid
# Call our function if its the first time ever, or if the
# version has changed
if initial_version != self._version or not self._ever_called:
self._log_func_exception(data, stat, event)
def _watcher(self, event):
self._get_data(event=event)
def _set_watch(self, state):
with self._run_lock:
self._watch_established = state
def _session_watcher(self, state):
if state == KazooState.CONNECTED:
self._client.handler.spawn(self._get_data)
class ChildrenWatch(object):
"""Watches a node for children updates and calls the specified
function each time it changes
The function will also be called the very first time its
registered to get children.
Returning `False` from the registered function will disable future
children change calls. If the client connection is closed (using
the close command), the ChildrenWatch will no longer get updates.
if send_event=True in __init__, then the function will always be
called with second parameter, ``event``. Upon initial call or when
recovering a lost session the ``event`` is always ``None``.
Otherwise it's a :class:`~kazoo.prototype.state.WatchedEvent`
instance.
Example with client:
.. code-block:: python
@client.ChildrenWatch('/path/to/watch')
def my_func(children):
print "Children are %s" % children
# Above function is called immediately and prints children
"""
def __init__(self, client, path, func=None,
allow_session_lost=True, send_event=False):
"""Create a children watcher for a path
:param client: A zookeeper client.
:type client: :class:`~kazoo.client.KazooClient`
:param path: The path to watch for children on.
:type path: str
:param func: Function to call initially and every time the
children change. `func` will be called with a
single argument, the list of children.
:type func: callable
:param allow_session_lost: Whether the watch should be
re-registered if the zookeeper
session is lost.
:type allow_session_lost: bool
:type send_event: bool
:param send_event: Whether the function should be passed the
event sent by ZooKeeper or None upon
initialization (see class documentation)
The path must already exist for the children watcher to
run.
"""
self._client = client
self._path = path
self._func = func
self._send_event = send_event
self._stopped = False
self._watch_established = False
self._allow_session_lost = allow_session_lost
self._run_lock = client.handler.lock_object()
self._prior_children = None
self._used = False
# Register our session listener if we're going to resume
# across session losses
if func is not None:
self._used = True
if allow_session_lost:
self._client.add_listener(self._session_watcher)
self._get_children()
def __call__(self, func):
"""Callable version for use as a decorator
:param func: Function to call initially and every time the
children change. `func` will be called with a
single argument, the list of children.
:type func: callable
"""
if self._used:
raise KazooException(
"A function has already been associated with this "
"ChildrenWatch instance.")
self._func = func
self._used = True
if self._allow_session_lost:
self._client.add_listener(self._session_watcher)
self._get_children()
return func
@_ignore_closed
def _get_children(self, event=None):
with self._run_lock: # Ensure this runs one at a time
if self._stopped:
return
children = self._client.retry(self._client.get_children,
self._path, self._watcher)
if not self._watch_established:
self._watch_established = True
if self._prior_children is not None and \
self._prior_children == children:
return
self._prior_children = children
try:
if self._send_event:
result = self._func(children, event)
else:
result = self._func(children)
if result is False:
self._stopped = True
except Exception as exc:
log.exception(exc)
raise
def _watcher(self, event):
self._get_children(event)
def _session_watcher(self, state):
if state in (KazooState.LOST, KazooState.SUSPENDED):
self._watch_established = False
elif (state == KazooState.CONNECTED and
not self._watch_established and not self._stopped):
self._client.handler.spawn(self._get_children)
class PatientChildrenWatch(object):
"""Patient Children Watch that returns values after the children
of a node don't change for a period of time
A separate watcher for the children of a node, that ignores
changes within a boundary time and sets the result only when the
boundary time has elapsed with no children changes.
Example::
watcher = PatientChildrenWatch(client, '/some/path',
time_boundary=5)
async_object = watcher.start()
# Blocks until the children have not changed for time boundary
# (5 in this case) seconds, returns children list and an
# async_result that will be set if the children change in the
# future
children, child_async = async_object.get()
.. note::
This Watch is different from :class:`DataWatch` and
:class:`ChildrenWatch` as it only returns once, does not take
a function that is called, and provides an
:class:`~kazoo.interfaces.IAsyncResult` object that can be
checked to see if the children have changed later.
"""
def __init__(self, client, path, time_boundary=30):
self.client = client
self.path = path
self.children = []
self.time_boundary = time_boundary
self.children_changed = client.handler.event_object()
def start(self):
"""Begin the watching process asynchronously
:returns: An :class:`~kazoo.interfaces.IAsyncResult` instance
that will be set when no change has occurred to the
children for time boundary seconds.
"""
self.asy = asy = self.client.handler.async_result()
self.client.handler.spawn(self._inner_start)
return asy
def _inner_start(self):
try:
while True:
async_result = self.client.handler.async_result()
self.children = self.client.retry(
self.client.get_children, self.path,
partial(self._children_watcher, async_result))
self.client.handler.sleep_func(self.time_boundary)
if self.children_changed.is_set():
self.children_changed.clear()
else:
break
self.asy.set((self.children, async_result))
except Exception as exc:
self.asy.set_exception(exc)
def _children_watcher(self, async, event):
self.children_changed.set()
async.set(time.time())
| {
"content_hash": "917583a43063770f38cbd4f4d90d0999",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 77,
"avg_line_length": 34.55369928400955,
"alnum_prop": 0.5837132200580191,
"repo_name": "pombredanne/kazoo",
"id": "ad585da15d5f66121f3c1b86819c930fe72a0679",
"size": "14478",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "kazoo/recipe/watchers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1344"
},
{
"name": "Python",
"bytes": "492706"
},
{
"name": "Shell",
"bytes": "798"
}
],
"symlink_target": ""
} |
"""Runs SpecCPU2006.
From SpecCPU2006's documentation:
The SPEC CPU2006 benchmark is SPEC's industry-standardized, CPU-intensive
benchmark suite, stressing a system's processor, memory subsystem and compiler.
SpecCPU2006 homepage: http://www.spec.org/cpu2006/
"""
import logging
import posixpath
import re
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
FLAGS = flags.FLAGS
flags.DEFINE_enum('benchmark_subset', 'int', ['int', 'fp', 'all'],
'specify a subset of benchmarks to run: int, fp, all')
flags.DEFINE_string('runspec_config', 'linux64-x64-gcc47.cfg',
'name of the cpu2006 configuration to use (runspec --config'
' argument)')
flags.DEFINE_integer('runspec_iterations', 3,
'number of benchmark iterations to execute - default 3 '
'(runspec --iterations argument)')
flags.DEFINE_string('runspec_define', '',
'optional comma separated list of preprocessor macros: '
'SYMBOL[=VALUE] - e.g. numa,smt,sse=SSE4.2 (runspec '
'--define arguments)')
flags.DEFINE_boolean('runspec_enable_32bit', default=False,
help='setting this flag will result in installation of '
'multilib packages to enable use of 32-bit cpu2006 '
'binaries (useful when running on memory constrained '
'instance types where 64-bit execution may be problematic '
' - i.e. < 1.5-2GB/core)')
BENCHMARK_INFO = {'name': 'speccpu2006',
'description': 'Run Spec CPU2006',
'scratch_disk': True,
'num_machines': 1}
SPECCPU2006_TAR = 'cpu2006v1.2.tgz'
SPECCPU2006_DIR = 'cpu2006'
def GetInfo():
return BENCHMARK_INFO
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
data.ResourcePath(SPECCPU2006_TAR)
def Prepare(benchmark_spec):
"""Install SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('prepare SpecCPU2006 on %s', vm)
vm.Install('wget')
vm.Install('build_tools')
vm.Install('fortran')
if (FLAGS.runspec_enable_32bit):
vm.Install('multilib')
vm.Install('numactl')
try:
local_tar_file_path = data.ResourcePath(SPECCPU2006_TAR)
except data.ResourceNotFound as e:
logging.error('Please provide %s under perfkitbenchmarker/data directory '
'before running SpecCPU2006 benchmark.', SPECCPU2006_TAR)
raise errors.Benchmarks.PrepareException(str(e))
vm.tar_file_path = posixpath.join(vm.GetScratchDir(), SPECCPU2006_TAR)
vm.spec_dir = posixpath.join(vm.GetScratchDir(), SPECCPU2006_DIR)
vm.RemoteCommand('chmod 777 %s' % vm.GetScratchDir())
vm.PushFile(local_tar_file_path, vm.GetScratchDir())
vm.RemoteCommand('cd %s && tar xvfz %s' % (vm.GetScratchDir(),
SPECCPU2006_TAR))
def ExtractScore(stdout, vm):
"""Exact the Spec (int|fp) score from stdout.
Args:
stdout: stdout from running RemoteCommand.
vm: The vm instance where Spec CPU2006 was run.
Sample input for SPECint:
...
...
=============================================
400.perlbench 9770 417 23.4 *
401.bzip2 9650 565 17.1 *
403.gcc 8050 364 22.1 *
429.mcf 9120 364 25.1 *
445.gobmk 10490 499 21.0 *
456.hmmer 9330 491 19.0 *
458.sjeng 12100 588 20.6 *
462.libquantum 20720 468 44.2 *
464.h264ref 22130 700 31.6 *
471.omnetpp 6250 349 17.9 *
473.astar 7020 482 14.6 *
483.xalancbmk 6900 248 27.8 *
Est. SPECint(R)_base2006 22.7
Sample input for SPECfp:
...
...
=============================================
410.bwaves 13590 717 19.0 *
416.gamess 19580 923 21.2 *
433.milc 9180 480 19.1 *
434.zeusmp 9100 600 15.2 *
435.gromacs 7140 605 11.8 *
436.cactusADM 11950 1289 9.27 *
437.leslie3d 9400 859 10.9 *
444.namd 8020 504 15.9 *
447.dealII 11440 409 28.0 *
450.soplex 8340 272 30.6 *
453.povray 5320 231 23.0 *
454.calculix 8250 993 8.31 *
459.GemsFDTD 10610 775 13.7 *
465.tonto 9840 565 17.4 *
470.lbm 13740 365 37.7 *
481.wrf 11170 788 14.2 *
482.sphinx3 19490 668 29.2 *
Est. SPECfp(R)_base2006 17.5
Returns:
A list of sample.Sample objects.
"""
results = []
re_begin_section = re.compile('^={1,}')
re_end_section = re.compile(r'Est. (SPEC.*_base2006)\s*(\S*)')
result_section = []
in_result_section = False
# Extract the summary section
for line in stdout.splitlines():
if in_result_section:
result_section.append(line)
# search for begin of result section
match = re.search(re_begin_section, line)
if match:
assert not in_result_section
in_result_section = True
continue
# search for end of result section
match = re.search(re_end_section, line)
if match:
assert in_result_section
spec_name = str(match.group(1))
spec_score = float(match.group(2))
in_result_section = False
# remove the final SPEC(int|fp) score, which has only 2 columns.
result_section.pop()
metadata = {'machine_type': vm.machine_type, 'num_cpus': vm.num_cpus}
results.append(sample.Sample(spec_name, spec_score, '', metadata))
for benchmark in result_section:
# ignore failed runs
if re.search('NR', benchmark):
continue
# name, ref_time, time, score, misc
name, _, _, score, _ = benchmark.split()
results.append(sample.Sample(str(name), float(score), '', metadata))
return results
def ParseOutput(vm):
"""Parses the output from Spec CPU2006.
Args:
vm: The vm instance where Spec CPU2006 was run.
Returns:
A list of samples to be published (in the same format as Run() returns).
"""
results = []
log_files = []
# FIXME(liquncheng): Only reference runs generate SPEC scores. The log
# id is hardcoded as 001, which might change with different runspec
# parameters. Spec CPU 2006 will generate different logs for build, test
# run, training run and ref run.
if FLAGS.benchmark_subset in ('int', 'all'):
log_files.append('CINT2006.001.ref.txt')
if FLAGS.benchmark_subset in ('fp', 'all'):
log_files.append('CFP2006.001.ref.txt')
for log in log_files:
stdout, _ = vm.RemoteCommand('cat %s/result/%s' % (vm.spec_dir, log),
should_log=True)
results.extend(ExtractScore(stdout, vm))
return results
def Run(benchmark_spec):
"""Run SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('SpecCPU2006 running on %s', vm)
num_cpus = vm.num_cpus
iterations = ' --iterations=' + repr(FLAGS.runspec_iterations) if \
FLAGS.runspec_iterations != 3 else ''
defines = ' --define ' + ' --define '.join(FLAGS.runspec_define.split(','))\
if FLAGS.runspec_define != '' else ''
vm.RemoteCommand('cd %s; . ./shrc; ./bin/relocate; . ./shrc; rm -rf result; '
'runspec --config=%s --tune=base '
'--size=ref --noreportable --rate %s%s%s %s'
% (vm.spec_dir, FLAGS.runspec_config, num_cpus, iterations,
defines, FLAGS.benchmark_subset))
logging.info('SpecCPU2006 Results:')
return ParseOutput(vm)
def Cleanup(benchmark_spec):
"""Cleanup SpecCPU2006 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
vm.RemoteCommand('rm -rf %s' % vm.spec_dir)
vm.RemoteCommand('rm -f %s' % vm.tar_file_path)
| {
"content_hash": "30f76b6ba1231ae6d612af1346e34771",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 80,
"avg_line_length": 34.69291338582677,
"alnum_prop": 0.5873808443032229,
"repo_name": "emaeliena/PerfKitBenchmarker",
"id": "30066053a4fae4c3fac3d0019a5e7e7ef223997e",
"size": "9407",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "perfkitbenchmarker/benchmarks/speccpu2006_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "681695"
},
{
"name": "Shell",
"bytes": "16445"
}
],
"symlink_target": ""
} |
'''
Test interacting with the wheel system. This script is useful when testing
wheel modules
'''
# Import Python libs
from __future__ import absolute_import
import optparse
import pprint
# Import Salt Libs
import salt.config
import salt.wheel
import salt.auth
def parse():
'''
Parse the command line options
'''
parser = optparse.OptionParser()
parser.add_option('-f',
'--fun',
'--function',
dest='fun',
help='The wheel function to execute')
parser.add_option('-a',
'--auth',
dest='eauth',
help='The external authentication mechanism to use')
options, args = parser.parse_args()
cli = options.__dict__
for arg in args:
if '=' in arg:
comps = arg.split('=')
cli[comps[0]] = comps[1]
return cli
class Wheeler(object):
'''
Set up communication with the wheel interface
'''
def __init__(self, cli):
self.opts = salt.config.master_config('/etc/salt')
self.opts.update(cli)
self.__eauth()
self.wheel = salt.wheel.Wheel(self.opts)
def __eauth(self):
'''
Fill in the blanks for the eauth system
'''
if self.opts['eauth']:
resolver = salt.auth.Resolver(self.opts)
res = resolver.cli(self.opts['eauth'])
self.opts.update(res)
def run(self):
'''
Execute the wheel call
'''
return self.wheel.master_call(**self.opts)
if __name__ == '__main__':
wheeler = Wheeler(parse())
pprint.pprint(wheeler.run())
| {
"content_hash": "1ba3fc2e3e0152189d85097c7a32037a",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 74,
"avg_line_length": 22.774647887323944,
"alnum_prop": 0.562152133580705,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "1bdec2e328d7dccf06aa28f45b3602eb9ab32a67",
"size": "1664",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/tests/wheeltest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 30, transform = "Difference", sigma = 0.0, exog_count = 0, ar_order = 0); | {
"content_hash": "6746a0b96d75d2d3acb4496169c650a9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 168,
"avg_line_length": 38.285714285714285,
"alnum_prop": 0.7089552238805971,
"repo_name": "antoinecarme/pyaf",
"id": "0c14f355c972d63312125314b3e93ac483ad27fc",
"size": "268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_ConstantTrend/cycle_30/ar_/test_artificial_32_Difference_ConstantTrend_30__0.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from django.db.models.signals import post_save
from django.dispatch import receiver
from users.models import User
from communications.models import SMS
@receiver(post_save, sender=User)
def post_save_user(sender, instance, created, **kwargs):
if created:
# instance.send_signup_greeting_sms()
SMS.objects.create(
receiver=instance.phonenumber,
content="{username}님, 회원가입을 축하드립니다.".format(
username=instance.username,
)
)
| {
"content_hash": "b8811efa2c1ea3cb5d4eac81628a91c0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 28.055555555555557,
"alnum_prop": 0.6633663366336634,
"repo_name": "dobestan/fastblog",
"id": "99010ae06233bb5b42e6b7587e34bd44d2e117a1",
"size": "529",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fastblog/users/signals/post_save.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from typing import Optional
from appium.options.common.supports_capabilities import SupportsCapabilities
DISABLE_SUPPRESS_ACCESSIBILITY_SERVICE = 'disableSuppressAccessibilityService'
class DisableSuppressAccessibilityServiceOption(SupportsCapabilities):
@property
def disable_suppress_accessibility_service(self) -> Optional[bool]:
"""
Whether to suppress accessibility services.
"""
return self.get_capability(DISABLE_SUPPRESS_ACCESSIBILITY_SERVICE)
@disable_suppress_accessibility_service.setter
def disable_suppress_accessibility_service(self, value: bool) -> None:
"""
Being set to true tells the instrumentation process to not suppress
accessibility services during the automated test. This might be useful
if your automated test needs these services. false by default.
"""
self.set_capability(DISABLE_SUPPRESS_ACCESSIBILITY_SERVICE, value)
| {
"content_hash": "8cd145feeecec90e0c770250edc37ca2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 41.21739130434783,
"alnum_prop": 0.7468354430379747,
"repo_name": "appium/python-client",
"id": "980da567c8990dafc76a34704e4aa0a31551fe8c",
"size": "1736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appium/options/android/common/other/disable_suppress_accessibility_service_option.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "835"
},
{
"name": "Python",
"bytes": "801497"
},
{
"name": "Shell",
"bytes": "3195"
}
],
"symlink_target": ""
} |
USAGE="""
XMT
usage:
xmt init [-v...] [--parse=OPTS] [--transfer=OPTS] [--generate=OPTS]
[--rephrase=OPTS] [--full] [--reverse] [--ace-bin=PATH]
DIR [ITEM...]
xmt parse [-v...] [ITEM...]
xmt transfer [-v...] [ITEM...]
xmt generate [-v...] [ITEM...]
xmt rephrase [-v...] [ITEM...]
xmt evaluate [--coverage] [--bleu] [--oracle-bleu] [--all] [--ignore=S]
[--summary-only] [-v...] [ITEM...]
xmt select [--oracle-bleu] [--tokenize] [--rephrasing] [--item-id]
[-v...] [ITEM...]
xmt [--help|--version]
Tasks:
init create/modify workspace and import profiles
parse analyze input strings with source grammar
transfer transfer source to target semantics
generate realize strings from target semantics
rephrase realize strings from source semantics
evaluate evaluate results of other tasks
select print translation/realization pairs
Arguments:
DIR workspace directory
ITEM profile to process
Options:
-h, --help print usage and exit
-V, --version print version and exit
-v, --verbose increase logging verbosity (may be repeated)
--parse OPTS configure parsing with OPTS
--transfer OPTS configure transfer with OPTS
--generate OPTS configure generation with OPTS
--rephrase OPTS configure rephrasing with OPTS
--full import full profiles, not just item info
--reverse switch input and translation sentences
--ace-bin PATH path to ace binary [default=ace]
Evaluation Options:
--coverage
--bleu
--oracle-bleu
--all
--ignore S
--summary-only
"""
OPTS_USAGE="""
Usage: task -g PATH [-n N] [-y] [--timeout S]
[--max-chart-megabytes=M] [--max-unpack-megabytes=M]
[--only-subsuming]
Options:
-g PATH path to a grammar image
-n N only record the top N results [default=5]
-y use yy mode on input
--timeout S allow S seconds per item [default=60]
--max-chart-megabytes M max RAM for parse chart in MB [default=1200]
--max-unpack-megabytes M max RAM for unpacking in MB [default=1500]
--only-subsuming realization MRS must subsume input MRS
"""
import os
import re
import shlex
from glob import glob
import json
import logging
from configparser import ConfigParser
from docopt import docopt
from delphin import itsdb
from xmt import task, select, evaluate, util
__version__ = '0.2.0'
default_config = {
'DEFAULT': {
'ace-bin': 'ace',
'num-results': 5,
'timeout': 60,
'result-buffer-size': 1000,
'max-chart-megabytes': 1200,
'max-unpack-megabytes': 1500,
'only-subsuming': 'no',
'yy-mode': 'no',
},
'parse': {},
'transfer': {},
'generate': {},
'rephrase': {},
}
relations_string = '''
item:
i-id :integer :key # item id
i-input :string # input string
i-length :integer # number of tokens in input
i-translation :string # reference translation
p-info:
i-id :integer :key # item parsed
time :integer # processing time (msec)
memory :integer # bytes of memory allocated
p-result:
i-id :integer :key # item parsed
p-id :integer :key # parse result id
derivation :string # derivation tree for this reading
mrs :string # mrs for this reading
score :float # parse reranker score
x-info:
i-id :integer :key # item parsed
p-id :integer :key # parse result id
time :integer # processing time (msec)
memory :integer # bytes of memory allocated
x-result:
i-id :integer :key # item parsed
p-id :integer :key # parse result id
x-id :integer :key # transfer result id
mrs :string # transferred mrs
score :float # transfer reranker score
g-info:
i-id :integer :key # item parsed
p-id :integer :key # parse result id
x-id :integer :key # transfer result id
time :integer # processing time (msec)
memory :integer # bytes of memory allocated
g-result:
i-id :integer :key # item parsed
p-id :integer :key # parse result id
x-id :integer :key # transfer result id
g-id :integer :key # generation result id
surface :string # realization string
mrs :string # specified mrs used in realization
score :float # realization reranker score
r-info:
i-id :integer :key # item parsed
p-id :integer :key # parse result id
time :integer # processing time (msec)
memory :integer # bytes of memory allocated
r-result:
i-id :integer :key # item parsed
p-id :integer :key # parse result id
r-id :integer :key # rephrase result id
surface :string # realization string
mrs :string # specified mrs used in realization
score :float # parse reranker score
'''
def main():
args = docopt(
USAGE,
version='Xmt {}'.format(__version__),
# options_first=True
)
logging.basicConfig(level=50 - ((args['--verbose'] + 2) * 10))
args['ITEM'] = [i for pattern in args['ITEM'] for i in glob(pattern)]
if args['init']:
init(args)
elif args['parse']:
task.do('parse', args)
elif args['transfer']:
task.do('transfer', args)
elif args['generate']:
task.do('generate', args)
elif args['rephrase']:
task.do('rephrase', args)
elif args['evaluate']:
evaluate.do(args)
elif args['select']:
select.do(args)
def init(args):
d = args['DIR']
prepare_workspace_dir(d)
config = ConfigParser()
config.read(os.path.join(d, 'default.conf'))
config['DEFAULT'] = dict(default_config['DEFAULT'])
util._update_config(config['DEFAULT'], args, None)
for task in ('parse', 'transfer', 'generate', 'rephrase'):
config.setdefault(task, default_config.get(task, {}))
if args['--' + task]:
argv = shlex.split(args['--' + task])
taskargs = docopt(OPTS_USAGE, argv=argv)
util._update_config(config[task], taskargs, task)
# default rephrase grammar to parse grammar
if 'grammar' not in config['rephrase'] and 'grammar' in config['parse']:
config['rephrase']['grammar'] = config['parse']['grammar']
for item in args['ITEM']:
item = os.path.normpath(item)
rows = item_rows(item, args['--reverse'])
itemdir = _unique_pathname(d, os.path.basename(item))
os.makedirs(itemdir)
with open(os.path.join(itemdir, 'relations'), 'w') as fh:
print(relations_string, file=fh)
p = itsdb.ItsdbProfile(itemdir)
p.write_table('item', rows, gzip=True)
if args['--full']:
info, results = _parse_tables(item)
p.write_table('p-info', info, gzip=True)
p.write_table('p-result', results, gzip=True)
with open(os.path.join(d, 'default.conf'), 'w') as fh:
config.write(fh)
def item_rows(item, reverse=False):
data = []
if os.path.isdir(item):
p = itsdb.ItsdbProfile(item)
output_fn = os.path.join(p.root, 'output')
if ((os.path.isfile(output_fn) or os.path.isfile(output_fn + '.gz'))
and len(list(p.read_table('output'))) > 0):
for row in p.join('item', 'output'):
data.append((
row['item:i-id'],
row['item:i-input'],
row['output:o-surface']
))
else:
data.extend(p.select('item', ['i-id', 'i-input', 'i-translation']))
elif os.path.isfile(item):
for i, line in enumerate(open(item)):
src, tgt = line.split('\t', 1)
data.append(((i+1)*10, src.rstrip(), tgt.rstrip()))
else:
raise ValueError('Invalid item: ' + str(item))
rows = []
for i_id, src, tgt in data:
if reverse:
src, tgt = tgt, src
rows.append({
'i-id': i_id,
'i-input': src,
'i-length': len(src.split()),
'i-translation': tgt
})
return rows
def _parse_tables(item):
info, results = [], []
makeinfo = lambda a, b, c: {
'i-id': a, 'time': b, 'memory': c
}
makeresult = lambda a, b, c, d, e: {
'i-id': a, 'p-id': b, 'derivation': c, 'mrs': d, 'score': e
}
if os.path.isdir(item):
p = itsdb.ItsdbProfile(item)
fn = os.path.join(p.root, 'parse')
if os.path.isfile(fn) or os.path.isfile(fn + '.gz'):
for row in p.read_table('parse'):
info.append(makeinfo(row['i-id'], row['total'], row['others']))
fn = os.path.join(p.root, 'result')
if os.path.isfile(fn) or os.path.isfile(fn + '.gz'):
for row in p.join('parse', 'result'):
results.append(makeresult(
row['parse:i-id'], row['result:result-id'],
row['result:derivation'], row['result:mrs'],
'1.0' # for now
))
else:
raise ValueError('Only profiles allowed with --full: ' + str(item))
return info, results
def _unique_pathname(d, bn):
fn = os.path.join(d, bn)
i = 0
while os.path.exists(fn):
i += 1
fn = os.path.join(d, bn + '.' + str(i))
return fn
def validate(args):
defaults = docopt(ACE_OPTS_USAGE, argv=args['--ace-opts'] or '')
p_opts = docopt(ACE_OPTS_USAGE, argv=args['--parse'] or '')
t_opts = docopt(ACE_OPTS_USAGE, argv=args['--transfer'] or '')
g_opts = docopt(ACE_OPTS_USAGE, argv=args['--generate'] or '')
g_opts = docopt(ACE_OPTS_USAGE, argv=args['--rephrase'] or '')
def prepare_workspace_dir(d):
if not os.path.isdir(d):
os.makedirs(d)
if __name__ == '__main__':
main()
| {
"content_hash": "d01a348379c5aad665d71fe97eb4d56d",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 79,
"avg_line_length": 34.13564668769716,
"alnum_prop": 0.5228721929581369,
"repo_name": "goodmami/xmt",
"id": "540a40f081e04054ee864634251611dec79a8da5",
"size": "10845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xmt/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "94483"
},
{
"name": "Shell",
"bytes": "2977"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID',
serialize=False, auto_created=True, primary_key=True)),
('phone_number', models.IntegerField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| {
"content_hash": "4f26d3a4a9b0b7f3617a87381f3b323a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 95,
"avg_line_length": 29.304347826086957,
"alnum_prop": 0.5712166172106825,
"repo_name": "frankmaina/django2fa",
"id": "896684851495bbf9cc08d47d5b00545b1531ec85",
"size": "698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "114007"
},
{
"name": "HTML",
"bytes": "7058"
},
{
"name": "JavaScript",
"bytes": "241003"
},
{
"name": "Python",
"bytes": "21015"
}
],
"symlink_target": ""
} |
ZOGGERY_PROTO_SPEC = """
protocol org.xlattice.zoggery
message logEntry:
timestamp fuint32
nodeID fbytes20
key fbytes20
length vuint32
by lstring
path lstring
"""
| {
"content_hash": "6a6623dc3c7585caca55f9d6a3e38591",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 29,
"avg_line_length": 18.181818181818183,
"alnum_prop": 0.655,
"repo_name": "jddixon/fieldz",
"id": "75d65edc4bca4f983abf01cbd8d1e5986c9eb776",
"size": "239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/zoggery_proto_spec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "249542"
},
{
"name": "Shell",
"bytes": "1949"
}
],
"symlink_target": ""
} |
"""distutils.command.bdist_wininst
Implements the Distutils 'bdist_wininst' command: create a windows installer
exe-program."""
# This module should be kept compatible with Python 1.5.2.
__revision__ = "$Id: bdist_wininst.py,v 1.46 2003/06/12 17:23:58 theller Exp $"
import sys, os, string
from distutils.core import Command
from distutils.util import get_platform
from distutils.dir_util import create_tree, remove_tree
from distutils.errors import *
from distutils.sysconfig import get_python_version
from distutils import log
class bdist_wininst (Command):
description = "create an executable installer for MS Windows"
user_options = [('bdist-dir=', None,
"temporary directory for creating the distribution"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('target-version=', 'v',
"require a specific python version" +
" on the target system"),
('no-target-compile', 'c',
"do not compile .py to .pyc on the target system"),
('no-target-optimize', 'o',
"do not compile .py to .pyo (optimized)"
"on the target system"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('bitmap=', 'b',
"bitmap to use for the installer instead of python-powered logo"),
('title=', 't',
"title to display on the installer background instead of default"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('install-script=', None,
"basename of installation script to be run after"
"installation or before deinstallation"),
]
boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
'skip-build']
def initialize_options (self):
self.bdist_dir = None
self.keep_temp = 0
self.no_target_compile = 0
self.no_target_optimize = 0
self.target_version = None
self.dist_dir = None
self.bitmap = None
self.title = None
self.skip_build = 0
self.install_script = None
# initialize_options()
def finalize_options (self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wininst')
if not self.target_version:
self.target_version = ""
if self.distribution.has_ext_modules():
short_version = get_python_version()
if self.target_version and self.target_version != short_version:
raise DistutilsOptionError, \
"target version can only be" + short_version
self.target_version = short_version
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.install_script:
for script in self.distribution.scripts:
if self.install_script == os.path.basename(script):
break
else:
raise DistutilsOptionError, \
"install_script '%s' not found in scripts" % \
self.install_script
# finalize_options()
def run (self):
if (sys.platform != "win32" and
(self.distribution.has_ext_modules() or
self.distribution.has_c_libraries())):
raise DistutilsPlatformError \
("distribution contains extensions and/or C libraries; "
"must be compiled on a Windows 32 platform")
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install', reinit_subcommands=1)
install.root = self.bdist_dir
install.skip_build = self.skip_build
install.warn_dir = 0
install_lib = self.reinitialize_command('install_lib')
# we do not want to include pyc or pyo files
install_lib.compile = 0
install_lib.optimize = 0
# Use a custom scheme for the zip-file, because we have to decide
# at installation time which scheme to use.
for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
value = string.upper(key)
if key == 'headers':
value = value + '/Include/$dist_name'
setattr(install,
'install_' + key,
value)
log.info("installing to %s", self.bdist_dir)
install.ensure_finalized()
# avoid warning of 'install_lib' about installing
# into a directory not in sys.path
sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
install.run()
del sys.path[0]
# And make an archive relative to the root of the
# pseudo-installation tree.
from tempfile import mktemp
archive_basename = mktemp()
fullname = self.distribution.get_fullname()
arcname = self.make_archive(archive_basename, "zip",
root_dir=self.bdist_dir)
# create an exe containing the zip-file
self.create_exe(arcname, fullname, self.bitmap)
# remove the zip-file again
log.debug("removing temporary file '%s'", arcname)
os.remove(arcname)
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# run()
def get_inidata (self):
# Return data describing the installation.
lines = []
metadata = self.distribution.metadata
# Write the [metadata] section. Values are written with
# repr()[1:-1], so they do not contain unprintable characters, and
# are not surrounded by quote chars.
lines.append("[metadata]")
# 'info' will be displayed in the installer's dialog box,
# describing the items to be installed.
info = (metadata.long_description or '') + '\n'
for name in ["author", "author_email", "description", "maintainer",
"maintainer_email", "name", "url", "version"]:
data = getattr(metadata, name, "")
if data:
info = info + ("\n %s: %s" % \
(string.capitalize(name), data))
lines.append("%s=%s" % (name, repr(data)[1:-1]))
# The [setup] section contains entries controlling
# the installer runtime.
lines.append("\n[Setup]")
if self.install_script:
lines.append("install_script=%s" % self.install_script)
lines.append("info=%s" % repr(info)[1:-1])
lines.append("target_compile=%d" % (not self.no_target_compile))
lines.append("target_optimize=%d" % (not self.no_target_optimize))
if self.target_version:
lines.append("target_version=%s" % self.target_version)
title = self.title or self.distribution.get_fullname()
lines.append("title=%s" % repr(title)[1:-1])
import time
import distutils
build_info = "Build %s with distutils-%s" % \
(time.ctime(time.time()), distutils.__version__)
lines.append("build_info=%s" % build_info)
return string.join(lines, "\n")
# get_inidata()
def create_exe (self, arcname, fullname, bitmap=None):
import struct
self.mkpath(self.dist_dir)
cfgdata = self.get_inidata()
if self.target_version:
# if we create an installer for a specific python version,
# it's better to include this in the name
installer_name = os.path.join(self.dist_dir,
"%s.win32-py%s.exe" %
(fullname, self.target_version))
else:
installer_name = os.path.join(self.dist_dir,
"%s.win32.exe" % fullname)
self.announce("creating %s" % installer_name)
if bitmap:
bitmapdata = open(bitmap, "rb").read()
bitmaplen = len(bitmapdata)
else:
bitmaplen = 0
file = open(installer_name, "wb")
file.write(self.get_exe_bytes())
if bitmap:
file.write(bitmapdata)
file.write(cfgdata)
header = struct.pack("<iii",
0x1234567A, # tag
len(cfgdata), # length
bitmaplen, # number of bytes in bitmap
)
file.write(header)
file.write(open(arcname, "rb").read())
# create_exe()
def get_exe_bytes (self):
# wininst.exe is in the same directory as this file
directory = os.path.dirname(__file__)
filename = os.path.join(directory, "wininst.exe")
return open(filename, "rb").read()
# class bdist_wininst
| {
"content_hash": "f9784804ad80544307bb315c9b3456a3",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 88,
"avg_line_length": 38.239669421487605,
"alnum_prop": 0.5499243570347958,
"repo_name": "OS2World/APP-INTERNET-torpak_2",
"id": "e2c48c534dd9252e21abbae17b19b618a9507752",
"size": "9254",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Lib/distutils/command/bdist_wininst.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from FileSystemLLAError import FileSystemLLAError
def copyACL(rc,output,outerr,parseParamList,Logger):
return
def makeMountpoint(rc,output,outerr,parseParamList,Logger):
return
def removeMountpoint(rc,output,outerr,parseParamList,Logger):
return
def listMountpoint(rc,output,outerr,parseParamList,Logger):
"""
Return target volume of a mount point
"""
mountpoint=""
return mountpoint
def getCellByPath(rc,output,outerr,parseParamList,Logger):
"""
Returns the cell to which a file or directory belongs
"""
cellname=""
return cellname
def setQuota(rc,output,outerr,parseParamList,Logger):
"""
Set a volume-quota by path
"""
return
def listQuota(rc,output,outerr,parseParamList,Logger):
"""
list a volume quota by path
"""
quota=-1
return quota
def returnVolumeByPath(rc,output,outerr,parseParamList,Logger):
"""
Basically a fs examine
"""
volume=""
return volume
| {
"content_hash": "be890733ddea18e9859873e414784342",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 63,
"avg_line_length": 22.75,
"alnum_prop": 0.6843156843156843,
"repo_name": "openafs-contrib/afspy",
"id": "aa6aa7e17f3e3be52b3c766393166c425a8163e7",
"size": "1001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "afs/util/FileSystemLLAParse.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "372676"
},
{
"name": "Shell",
"bytes": "98"
}
],
"symlink_target": ""
} |
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "Moneta-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| {
"content_hash": "8f36e6ab4f349e23514ed2e9691036e9",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 69,
"avg_line_length": 27.166666666666668,
"alnum_prop": 0.7085889570552147,
"repo_name": "moneta-dev/moneta",
"id": "ff9a84b649d47015ecfdb8d40df1c287b56c6d2e",
"size": "893",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "share/qt/clean_mac_info_plist.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32090"
},
{
"name": "C++",
"bytes": "2614163"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Makefile",
"bytes": "9739"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69716"
},
{
"name": "Shell",
"bytes": "13173"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
import bootstrap
try:
from django.conf import settings
settings.configure()
except ImportError:
print 'Failed to import Django'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ims-bootstrap'
copyright = u'2016, Dan Watson'
author = u'Dan Watson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = bootstrap.__version__
# The full version, including alpha/beta/rc tags.
release = bootstrap.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'ims-bootstrapdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ims-bootstrap.tex', u'ims-bootstrap Documentation',
u'Dan Watson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ims-bootstrap', u'ims-bootstrap Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ims-bootstrap', u'ims-bootstrap Documentation',
author, 'ims-bootstrap', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "e4e68488153a18e6c1941800ce0d54e7",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 79,
"avg_line_length": 33.1,
"alnum_prop": 0.6842900302114804,
"repo_name": "imsweb/django-bootstrap",
"id": "540c1460b636f97b31a66f8195c830bbc452706d",
"size": "9706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "48537"
},
{
"name": "HTML",
"bytes": "5574"
},
{
"name": "JavaScript",
"bytes": "236984"
},
{
"name": "Python",
"bytes": "19652"
}
],
"symlink_target": ""
} |
__author__ = 'Ariel Anthieni'
#Definicion de Librerias
import os
import json
import csv
import codecs
#Establecimiento de variables
dir_origen = '/opt/desarrollo/metadata-tools/convert-tools/data/in/'
dir_destino = '/opt/desarrollo/metadata-tools/convert-tools/data/out/'
geocampo = 'WKT'
tabla = 'calles_indec'
#Listo los archivos en el directorio
ficheros = os.listdir(dir_origen)
"""
El script analiza el contenido del encabezado del csv y genera el array luego produciendo el codigo sql
para crear la tabla e insertar los registros
"""
for archivo in ficheros:
ext = os.path.splitext(archivo)
#verifico si es un producto
if (( ext[0] == '20161212calles_gba')):
#abro el csv
filecsv = open(dir_origen+archivo)
objcsv = csv.reader(filecsv)
#Paso a un array la estructura
arreglo = []
geoarreglo = []
elementos_sql = {}
multigeo = {}
multiwkt = ''
for elemento in objcsv:
arreglo.append(elemento)
filecsv.close()
encabezado = arreglo[0]
encabezado_col = ''
#Genero el archivo de destino
resultado = codecs.open(dir_destino+ext[0]+'.sql', 'w','utf-8')
#jsongeo = json.dumps(georesultado, ensure_ascii=False).encode('utf8')
#resultado.write(jsongeo.decode('utf-8'))
#creamos la tabla necesario para la importacion
createsql = 'CREATE TABLE '+tabla+' ('
for col in encabezado:
if col == geocampo:
createsql = createsql + col + ' ' + 'geometry , '
else:
createsql = createsql + col + ' ' + 'character varying(255) , '
encabezado_col = encabezado_col + col + ', '
createsql = createsql[:-2]
encabezado_col = encabezado_col[:-2]
createsql = createsql + ');\n'
#Escribo en el archivo
resultado.write(createsql)
idgeo = encabezado.index(geocampo)
i = 0
for elemento in arreglo:
#Recorro el encabezado
if i == 0 :
i=i+1
else:
j = 0
elementos_sql = []
for col in encabezado:
elementos_sql.append(elemento[j])
j=j+1
#Genero el registro de insercion
insertsql = 'INSERT INTO '+tabla + ' (' + encabezado_col + ') VALUES ('
for columna in elementos_sql:
insertsql = insertsql +"$$" + columna + "$$" + ', '
insertsql = insertsql[:-2]
insertsql = insertsql + ');\n'
#Escribo en el archivo
resultado.write(insertsql)
resultado.close()
| {
"content_hash": "15375c177d2786a2aca54c286bff3009",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 103,
"avg_line_length": 23.775862068965516,
"alnum_prop": 0.5532994923857868,
"repo_name": "elcoloo/metadata-tools",
"id": "08627cea7e27d5071ec070675023a0b58c37e61f",
"size": "2758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "convert-tools/csv_gba_to_sql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22284"
}
],
"symlink_target": ""
} |
from datetime import datetime
from qiita_db.metadata_template.constants import (
SAMPLE_TEMPLATE_COLUMNS, PREP_TEMPLATE_COLUMNS,
PREP_TEMPLATE_COLUMNS_TARGET_GENE)
from qiita_db.metadata_template.prep_template import PrepTemplate
from qiita_db.metadata_template.sample_template import SampleTemplate
from qiita_db.sql_connection import TRN
# getting columns in each info file that we need to check for
cols_sample = [col
for key, vals in SAMPLE_TEMPLATE_COLUMNS.items()
for col, dt in vals.columns.items() if dt == datetime]
cols_prep = [col
for key, vals in PREP_TEMPLATE_COLUMNS.items()
for col, dt in vals.columns.items() if dt == datetime].extend(
[col
for key, vals in PREP_TEMPLATE_COLUMNS_TARGET_GENE.items()
for col, dt in vals.columns.items()])
def transform_date(value):
# for the way the patches are applied we need to have this import and
# the next 2 variables within this function
from datetime import datetime
# old format : new format
formats = {
# 4 digits year
'%m/%d/%Y %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%m-%d-%Y %H:%M': '%Y-%m-%d %H:%M',
'%m/%d/%Y %H': '%Y-%m-%d %H',
'%m-%d-%Y': '%Y-%m-%d',
'%m-%Y': '%Y-%m',
'%Y': '%Y',
# 2 digits year
'%m/%d/%y %H:%M:%S': '%Y-%m-%d %H:%M:%S',
'%m-%d-%y %H:%M': '%Y-%m-%d %H:%M',
'%m/%d/%y %H': '%Y-%m-%d %H',
'%m-%d-%y': '%Y-%m-%d',
'%m-%y': '%Y-%m',
'%y': '%Y'
}
# loop over the old formats to see which one is it
if value is not None:
date = None
for i, fmt in enumerate(formats):
try:
date = datetime.strptime(value, fmt)
break
except ValueError:
pass
if date is not None:
value = date.strftime(formats[fmt])
return value
if cols_sample:
with TRN:
# a few notes: just getting the preps with duplicated values; ignoring
# column 'sample_id' and tables 'study_sample', 'prep_template',
# 'prep_template_sample'
sql = """SELECT table_name, array_agg(column_name::text)
FROM information_schema.columns
WHERE column_name IN %s
AND table_name LIKE 'sample_%%'
AND table_name NOT IN (
'prep_template', 'prep_template_sample')
GROUP BY table_name"""
# note that we are looking for those columns with duplicated names in
# the headers
TRN.add(sql, [tuple(set(cols_sample))])
for table, columns in dict(TRN.execute_fetchindex()).items():
# [1] the format is table_# so taking the #
st = SampleTemplate(int(table.split('_')[1]))
# getting just the columns of interest
st_df = st.to_dataframe()[columns]
# converting to datetime
for col in columns:
st_df[col] = st_df[col].apply(transform_date)
st.update(st_df)
if cols_prep:
with TRN:
# a few notes: just getting the preps with duplicated values; ignoring
# column 'sample_id' and tables 'study_sample', 'prep_template',
# 'prep_template_sample'
sql = """SELECT table_name, array_agg(column_name::text)
FROM information_schema.columns
WHERE column_name IN %s
AND table_name LIKE 'prep_%%'
AND table_name NOT IN (
'prep_template', 'prep_template_sample')
GROUP BY table_name"""
# note that we are looking for those columns with duplicated names in
# the headers
TRN.add(sql, [tuple(set(cols_prep))])
for table, columns in dict(TRN.execute_fetchindex()).items():
# [1] the format is table_# so taking the #
pt = PrepTemplate(int(table.split('_')[1]))
# getting just the columns of interest
pt_df = pt.to_dataframe()[columns]
# converting to datetime
for col in columns:
pt_df[col] = pt_df[col].apply(transform_date)
pt.update(pt_df)
| {
"content_hash": "0c6aa96c02c88e4ffe8f511b41d56e0c",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 39.61467889908257,
"alnum_prop": 0.539138490041686,
"repo_name": "antgonza/qiita",
"id": "62c86e02e83278bd30f0b7a8949e1fc678ba39cd",
"size": "4669",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qiita_db/support_files/patches/python_patches/51.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2334"
},
{
"name": "HTML",
"bytes": "983129"
},
{
"name": "JavaScript",
"bytes": "95318"
},
{
"name": "Makefile",
"bytes": "6838"
},
{
"name": "PLpgSQL",
"bytes": "87575"
},
{
"name": "Python",
"bytes": "2615829"
},
{
"name": "Shell",
"bytes": "3016"
}
],
"symlink_target": ""
} |
import sys
import os
from time import sleep
from threading import Timer
import rospy
import baxter_interface
from baxter_interface import CHECK_VERSION
class JointRecorder(object):
def __init__(self, filename, rate):
"""
Records joint data to a file at a specified rate.
"""
self._filename = filename
self._raw_rate = rate
self._rate = rospy.Rate(rate)
self._start_time = rospy.get_time()
self._done = False
self._limb_left = baxter_interface.Limb("left")
self._limb_right = baxter_interface.Limb("right")
self._gripper_left = baxter_interface.Gripper("left", CHECK_VERSION)
self._gripper_right = baxter_interface.Gripper("right", CHECK_VERSION)
self._io_left_lower = baxter_interface.DigitalIO('left_lower_button')
self._io_left_upper = baxter_interface.DigitalIO('left_upper_button')
self._io_right_lower = baxter_interface.DigitalIO('right_lower_button')
self._io_right_upper = baxter_interface.DigitalIO('right_upper_button')
# Verify Grippers Have No Errors and are Calibrated
if self._gripper_left.error():
self._gripper_left.reset()
if self._gripper_right.error():
self._gripper_right.reset()
if (not self._gripper_left.calibrated() and
self._gripper_left.type() != 'custom'):
self._gripper_left.calibrate()
if (not self._gripper_right.calibrated() and
self._gripper_right.type() != 'custom'):
self._gripper_right.calibrate()
def _time_stamp(self):
return rospy.get_time() - self._start_time
def stop(self):
"""
Stop recording.
"""
print "!!!! Closing Record File !!!!"
#self.file.close()
self._done = True
def done(self):
"""
Return whether or not recording is done.
"""
if rospy.is_shutdown():
self.stop()
return self._done
def record(self):
"""
Records the current joint positions to a csv file if outputFilename was
provided at construction this function will record the latest set of
joint angles in a csv format.
This function does not test to see if a file exists and will overwrite
existing files.
"""
if self._filename:
if os.path.exists(self._filename):
os.remove(self._filename)
self.joints_left = self._limb_left.joint_names()
self.joints_right = self._limb_right.joint_names()
self.file = open(self._filename, 'w')
self.file.write('time,')
self.file.write(','.join([j for j in self.joints_left]) + ',')
self.file.write('left_gripper,')
self.file.write(','.join([j for j in self.joints_right]) + ',')
self.file.write('right_gripper\n')
t = Timer(1.0/self._raw_rate, self.save)
t.start()
def save(self):
# Look for gripper button presses
if self._io_left_lower.state:
self._gripper_left.open()
elif self._io_left_upper.state:
self._gripper_left.close()
if self._io_right_lower.state:
self._gripper_right.open()
elif self._io_right_upper.state:
self._gripper_right.close()
angles_left = [self._limb_left.joint_angle(j)
for j in self.joints_left]
angles_right = [self._limb_right.joint_angle(j)
for j in self.joints_right]
self.file.write("%f," % (self._time_stamp(),))
self.file.write(','.join([str(x) for x in angles_left]) + ',')
self.file.write(str(self._gripper_left.position()) + ',')
self.file.write(','.join([str(x) for x in angles_right]) + ',')
self.file.write(str(self._gripper_right.position()) + '\n')
if not self.done():
t = Timer(1.0/self._raw_rate, self.save)
t.start()
else:
print "!!!! Closing Record File !!!!"
self.file.close()
#self._rate.sleep()
| {
"content_hash": "54c18cf5a283dcb4b76998f0e2c4e726",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 35.08403361344538,
"alnum_prop": 0.5667065868263473,
"repo_name": "LCAS/zoidbot",
"id": "a1d82184003169677cf126a45ddfe05bb170265e",
"size": "4195",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zoidbot_tools/src/zoidbot_tools/recorder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "62091"
},
{
"name": "CMake",
"bytes": "9940"
},
{
"name": "Lua",
"bytes": "32429"
},
{
"name": "Python",
"bytes": "223630"
},
{
"name": "Shell",
"bytes": "1048"
}
],
"symlink_target": ""
} |
from unittest import mock
import uuid
def service_registered_successfully(registry, *services):
for service in services:
service_entry = (
service['host'], service['port'], service['node_id'], service['type'])
try:
entry = registry._repository._registered_services[
service['name']][service['version']]
assert service_entry in entry
except KeyError:
raise
return True
def no_pending_services(registry):
return len(registry._repository.get_pending_services()) == 0
def instance_returned_successfully(response, service):
instance = (
service['host'], service['port'], service['node_id'], service['type'])
for returned_instance in response['params']['instances']:
t = (
returned_instance['host'], returned_instance['port'], returned_instance['node'], returned_instance['type'])
if instance == t:
return True
return False
def subscriber_returned_successfully(response, service):
service_t = (service['host'], service['port'], service['node_id'], service['name'], service['version'])
for s in response['params']['subscribers']:
subscriber_t = (s['host'], s['port'], s['node_id'], s['name'], s['version'])
if service_t == subscriber_t:
return True
return False
def test_register_independent_service(registry, service_a1):
registry.register_service(
packet={'params': service_a1}, registry_protocol=mock.Mock())
assert service_registered_successfully(registry, service_a1)
assert no_pending_services(registry)
def test_register_dependent_service(registry, service_a1, service_b1):
registry.register_service(
packet={'params': service_b1}, registry_protocol=mock.Mock())
assert not no_pending_services(registry)
registry.register_service(
packet={'params': service_a1}, registry_protocol=mock.Mock())
assert no_pending_services(registry)
assert service_registered_successfully(registry, service_a1, service_b1)
def test_deregister_dependent_service(service_a1, service_b1, registry):
registry.register_service(
packet={'params': service_b1}, registry_protocol=mock.Mock())
registry.register_service(
packet={'params': service_a1}, registry_protocol=mock.Mock())
assert no_pending_services(registry)
registry.deregister_service(service_a1['host'], service_a1['port'], service_a1['node_id'])
assert not no_pending_services(registry)
def test_get_instances(service_a1, registry):
registry.register_service(
packet={'params': service_a1}, registry_protocol=mock.Mock())
protocol = mock.Mock()
registry.get_service_instances(
packet={'params': service_a1, 'request_id': str(uuid.uuid4())}, registry_protocol=protocol)
assert instance_returned_successfully(
protocol.send.call_args_list[0][0][0], service_a1)
def test_xsubscribe(service_a1, service_d1, registry):
# assert service_d1 == {}
registry.register_service(
packet={'params': service_a1}, registry_protocol=mock.Mock())
registry.register_service(
packet={'params': service_d1}, registry_protocol=mock.Mock())
registry._xsubscribe(packet={'params': service_d1})
protocol = mock.Mock()
params = {
'name': service_a1['name'],
'version': service_a1['version'],
'endpoint': service_d1['events'][0]['endpoint']
}
registry.get_subscribers(packet={'params': params, 'request_id': str(uuid.uuid4())}, protocol=protocol)
assert subscriber_returned_successfully(protocol.send.call_args_list[0][0][0], service_d1)
| {
"content_hash": "c6f6e242dc519191c88b39d5bb9434ea",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 119,
"avg_line_length": 35.095238095238095,
"alnum_prop": 0.6640434192672998,
"repo_name": "kashifrazzaqui/vyked",
"id": "80af64a08d15a406a932d00d250ef1e26a449137",
"size": "3685",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/test_registry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "110600"
},
{
"name": "Shell",
"bytes": "531"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from djcelery.models import TaskMeta
from .models import (Application, BenchmarkConfig, DBConf, DBMSCatalog,
DBMSMetrics, KnobCatalog, MetricCatalog, PipelineResult,
Project, Result, ResultData, Statistics, WorkloadCluster)
class DBMSCatalogAdmin(admin.ModelAdmin):
list_display = ['dbms_info']
def dbms_info(self, obj):
return obj.full_name
class KnobCatalogAdmin(admin.ModelAdmin):
list_display = ['name', 'dbms_info', 'tunable']
ordering = ['name', 'dbms__type', 'dbms__version']
list_filter = ['tunable']
def dbms_info(self, obj):
return obj.dbms.full_name
class MetricCatalogAdmin(admin.ModelAdmin):
list_display = ['name', 'dbms_info', 'metric_type']
ordering = ['name', 'dbms__type', 'dbms__version']
list_filter = ['metric_type']
def dbms_info(self, obj):
return obj.dbms.full_name
class ProjectAdmin(admin.ModelAdmin):
list_display = ('name', 'user', 'last_update', 'creation_time')
fields = ['name', 'user', 'last_update', 'creation_time']
class ApplicationAdmin(admin.ModelAdmin):
fields = ['name', 'user', 'description',
'creation_time', 'last_update', 'upload_code',
'nondefault_settings']
list_display = ('name', 'user', 'last_update', 'creation_time')
list_display_links = ('name',)
class BenchmarkConfigAdmin(admin.ModelAdmin):
list_display = ['name', 'benchmark_type', 'creation_time']
list_filter = ['benchmark_type']
fields = ['application', 'name', 'benchmark_type', 'creation_time',
'isolation', 'scalefactor', 'terminals', 'rate', 'time',
'skew', 'configuration']
class DBConfAdmin(admin.ModelAdmin):
list_display = ['name', 'dbms_info', 'creation_time']
fields = ['application', 'name', 'creation_time',
'configuration', 'orig_config_diffs', 'dbms']
def dbms_info(self, obj):
return obj.dbms.full_name
class DBMSMetricsAdmin(admin.ModelAdmin):
list_display = ['name', 'dbms_info', 'creation_time']
fields = ['application', 'name', 'creation_time',
'execution_time', 'configuration', 'orig_config_diffs', 'dbms']
def dbms_info(self, obj):
return obj.dbms.full_name
class TaskMetaAdmin(admin.ModelAdmin):
# readonly_fields = ('result',)
list_display = ['id', 'status', 'date_done']
class ResultAdmin(admin.ModelAdmin):
list_display = ['result_id', 'dbms_info', 'benchmark', 'creation_time']
list_filter = ['dbms__type', 'dbms__version',
'benchmark_config__benchmark_type']
ordering = ['id']
def result_id(self, obj):
return obj.id
def dbms_info(self, obj):
return obj.dbms.full_name
def benchmark(self, obj):
return obj.benchmark_config.benchmark_type
class ResultDataAdmin(admin.ModelAdmin):
list_display = ['id', 'dbms_info', 'hardware_info']
def dbms_info(self, obj):
return obj.cluster.dbms.full_name
def hardware_info(self, obj):
return obj.cluster.hardware.name
class PipelineResultAdmin(admin.ModelAdmin):
list_display = ['task_type', 'dbms_info',
'hardware_info', 'creation_timestamp']
def dbms_info(self, obj):
return obj.dbms.full_name
def hardware_info(self, obj):
return obj.hardware.name
class StatisticsAdmin(admin.ModelAdmin):
list_display = ['id', 'type', 'time']
list_filter = ['type']
class WorkloadClusterAdmin(admin.ModelAdmin):
list_display = ['cluster_id', 'cluster_name']
def cluster_id(self, obj):
return obj.pk
admin.site.register(DBMSCatalog, DBMSCatalogAdmin)
admin.site.register(KnobCatalog, KnobCatalogAdmin)
admin.site.register(MetricCatalog, MetricCatalogAdmin)
admin.site.register(Application, ApplicationAdmin)
admin.site.register(Project, ProjectAdmin)
admin.site.register(BenchmarkConfig, BenchmarkConfigAdmin)
admin.site.register(DBConf, DBConfAdmin)
admin.site.register(DBMSMetrics, DBMSMetricsAdmin)
admin.site.register(TaskMeta, TaskMetaAdmin)
admin.site.register(Result, ResultAdmin)
admin.site.register(ResultData, ResultDataAdmin)
admin.site.register(PipelineResult, PipelineResultAdmin)
admin.site.register(Statistics, StatisticsAdmin)
admin.site.register(WorkloadCluster, WorkloadClusterAdmin)
| {
"content_hash": "b576971b193c5d201e6297ce5702eefa",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 78,
"avg_line_length": 31.235714285714284,
"alnum_prop": 0.6695632289046421,
"repo_name": "dvanaken/website",
"id": "d5c4f34cca15b726007ba44324b8e03ba7aaf00d",
"size": "4373",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "website/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5964"
},
{
"name": "HTML",
"bytes": "47581"
},
{
"name": "JavaScript",
"bytes": "24394"
},
{
"name": "Python",
"bytes": "171863"
}
],
"symlink_target": ""
} |
from client import SliceMatrix
import core
import connect
| {
"content_hash": "6af95aee9c4dde7209d012ad51b430ac",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 30,
"avg_line_length": 19.333333333333332,
"alnum_prop": 0.8620689655172413,
"repo_name": "tynano/slicematrixIO-python",
"id": "4fcf45d8103caa30e29b49f6ee2146c74789f97e",
"size": "58",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slicematrixIO/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114176"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
from plantcv.plantcv import rotate
def test_rotate(test_data):
"""Test for PlantCV."""
img = cv2.imread(test_data.small_rgb_img)
rotated = rotate(img=img, rotation_deg=45, crop=True)
imgavg = np.average(img)
rotateavg = np.average(rotated)
assert rotateavg != imgavg
| {
"content_hash": "46946c94082dffd98624f6dbc99099c1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 57,
"avg_line_length": 26.916666666666668,
"alnum_prop": 0.6934984520123839,
"repo_name": "danforthcenter/plantcv",
"id": "e4891d3ac85424ad4c9b748cb4113efad6edf359",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/plantcv/test_rotate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1112"
},
{
"name": "Python",
"bytes": "898011"
},
{
"name": "R",
"bytes": "1327"
},
{
"name": "Shell",
"bytes": "3348"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.utils.translation import ugettext_lazy as _
from sentry import http, options
from sentry.identity.pipeline import IdentityProviderPipeline
from sentry.identity.github import get_user_info
from sentry.integrations import Integration, IntegrationFeatures, IntegrationProvider, IntegrationMetadata
from sentry.integrations.exceptions import ApiError
from sentry.integrations.constants import ERR_INTERNAL, ERR_UNAUTHORIZED
from sentry.integrations.repositories import RepositoryMixin
from sentry.pipeline import NestedPipelineView, PipelineView
from sentry.utils.http import absolute_uri
from .client import GitHubAppsClient
from .issues import GitHubIssueBasic
from .repository import GitHubRepositoryProvider
from .utils import get_jwt
DESCRIPTION = """
Define a relationship between Sentry and GitHub.
* Authorize repositories to be added for syncing commit data.
* Create or link existing GitHub issues. (coming soon)
"""
disable_dialog = {
'actionText': 'Visit GitHub',
'body': 'Before deleting this integration, you must uninstall this integration from GitHub. After uninstalling, your integration will be disabled at which point you can choose to delete this integration.'
}
removal_dialog = {
'actionText': 'Delete',
'body': 'Deleting this integration will delete all associated repositories and commit data. This action cannot be undone. Are you sure you want to delete your integration?'
}
metadata = IntegrationMetadata(
description=DESCRIPTION.strip(),
author='The Sentry Team',
noun=_('Installation'),
issue_url='https://github.com/getsentry/sentry/issues/new?title=GitHub%20Integration:%20&labels=Component%3A%20Integrations',
source_url='https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/github',
aspects={
'disable_dialog': disable_dialog,
'removal_dialog': removal_dialog,
},
)
API_ERRORS = {
404: 'GitHub returned a 404 Not Found error. If this repository exists, ensure'
' that your installation has permission to access this repository'
' (https://github.com/settings/installations).',
401: ERR_UNAUTHORIZED,
}
class GitHubIntegration(Integration, GitHubIssueBasic, RepositoryMixin):
def get_client(self):
return GitHubAppsClient(integration=self.model)
def get_repositories(self):
return self.get_client().get_repositories()
def reinstall(self):
self.reinstall_repositories()
def message_from_error(self, exc):
if isinstance(exc, ApiError):
message = API_ERRORS.get(exc.code)
if message:
return message
return (
'Error Communicating with GitHub (HTTP %s): %s' % (
exc.code, exc.json.get('message', 'unknown error')
if exc.json else 'unknown error',
)
)
else:
return ERR_INTERNAL
class GitHubIntegrationProvider(IntegrationProvider):
key = 'github'
name = 'GitHub'
metadata = metadata
integration_cls = GitHubIntegration
features = frozenset([
IntegrationFeatures.COMMITS,
IntegrationFeatures.ISSUE_BASIC,
])
setup_dialog_config = {
'width': 1030,
'height': 1000,
}
def get_pipeline_views(self):
identity_pipeline_config = {
'oauth_scopes': (),
'redirect_url': absolute_uri('/extensions/github/setup/'),
}
identity_pipeline_view = NestedPipelineView(
bind_key='identity',
provider_key='github',
pipeline_cls=IdentityProviderPipeline,
config=identity_pipeline_config,
)
return [GitHubInstallationRedirect(), identity_pipeline_view]
def get_installation_info(self, access_token, installation_id):
session = http.build_session()
resp = session.get(
'https://api.github.com/app/installations/%s' % installation_id,
headers={
'Authorization': 'Bearer %s' % get_jwt(),
'Accept': 'application/vnd.github.machine-man-preview+json',
}
)
resp.raise_for_status()
installation_resp = resp.json()
resp = session.get(
'https://api.github.com/user/installations',
params={'access_token': access_token},
headers={'Accept': 'application/vnd.github.machine-man-preview+json'}
)
resp.raise_for_status()
user_installations_resp = resp.json()
# verify that user actually has access to the installation
for installation in user_installations_resp['installations']:
if installation['id'] == installation_resp['id']:
return installation_resp
return None
def build_integration(self, state):
identity = state['identity']['data']
user = get_user_info(identity['access_token'])
installation = self.get_installation_info(
identity['access_token'], state['installation_id'])
integration = {
'name': installation['account']['login'],
# TODO(adhiraj): This should be a constant representing the entire github cloud.
'external_id': installation['id'],
# GitHub identity is associated directly to the application, *not*
# to the installation itself.
'idp_external_id': installation['app_id'],
'metadata': {
# The access token will be populated upon API usage
'access_token': None,
'expires_at': None,
'icon': installation['account']['avatar_url'],
'domain_name': installation['account']['html_url'].replace('https://', ''),
'account_type': installation['account']['type'],
},
'user_identity': {
'type': 'github',
'external_id': user['id'],
'scopes': [], # GitHub apps do not have user scopes
'data': {'access_token': identity['access_token']},
},
}
if state.get('reinstall_id'):
integration['reinstall_id'] = state['reinstall_id']
return integration
def setup(self):
from sentry.plugins import bindings
bindings.add(
'integration-repository.provider',
GitHubRepositoryProvider,
id='integrations:github',
)
class GitHubInstallationRedirect(PipelineView):
def get_app_url(self):
name = options.get('github-app.name')
return 'https://github.com/apps/%s' % name
def dispatch(self, request, pipeline):
if 'reinstall_id' in request.GET:
pipeline.bind_state('reinstall_id', request.GET['reinstall_id'])
if 'installation_id' in request.GET:
pipeline.bind_state('installation_id', request.GET['installation_id'])
return pipeline.next_step()
return self.redirect(self.get_app_url())
| {
"content_hash": "7600547767ba7aa8e67b133a26e331b7",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 208,
"avg_line_length": 35.9492385786802,
"alnum_prop": 0.6341428974865857,
"repo_name": "looker/sentry",
"id": "21331a5a4a25f33fac9bcfc563cc22c1850b82a7",
"size": "7082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/integrations/github/integration.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "289931"
},
{
"name": "HTML",
"bytes": "241322"
},
{
"name": "JavaScript",
"bytes": "3112298"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "7048"
},
{
"name": "Python",
"bytes": "36341504"
},
{
"name": "Ruby",
"bytes": "204"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
"""Tests for network API"""
from nova import context
from nova import network
from nova.openstack.common import rpc
from nova import test
class ApiTestCase(test.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.network_api = network.API()
self.context = context.RequestContext('fake-user',
'fake-project')
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic"""
new_instance = {'uuid': 'new-uuid'}
def fake_rpc_call(context, topic, msg):
return orig_instance_uuid
self.stubs.Set(rpc, 'call', fake_rpc_call)
def fake_instance_get_by_uuid(context, instance_uuid):
return {'uuid': instance_uuid}
self.stubs.Set(self.network_api.db, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
def fake_get_nw_info(ctxt, instance):
class FakeNWInfo(object):
def json(self):
pass
return FakeNWInfo()
self.stubs.Set(self.network_api, '_get_instance_nw_info',
fake_get_nw_info)
if orig_instance_uuid:
expected_updated_instances = [new_instance['uuid'],
orig_instance_uuid]
else:
expected_updated_instances = [new_instance['uuid']]
def fake_instance_info_cache_update(context, instance_uuid, cache):
self.assertEquals(instance_uuid,
expected_updated_instances.pop())
self.stubs.Set(self.network_api.db, 'instance_info_cache_update',
fake_instance_info_cache_update)
self.network_api.associate_floating_ip(self.context,
new_instance,
'172.24.4.225',
'10.0.0.2')
def test_associate_preassociated_floating_ip(self):
self._do_test_associate_floating_ip('orig-uuid')
def test_associate_unassociated_floating_ip(self):
self._do_test_associate_floating_ip(None)
| {
"content_hash": "031c508ee783a474dbb37e6897c043d9",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 75,
"avg_line_length": 35.20634920634921,
"alnum_prop": 0.5495942290351669,
"repo_name": "NewpTone/stacklab-nova",
"id": "a29756caaea4f8ace056a17a33bdf53aed50092d",
"size": "2892",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "debian/python-nova/usr/share/pyshared/nova/tests/network/test_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "62986"
},
{
"name": "Python",
"bytes": "21263831"
},
{
"name": "Shell",
"bytes": "51461"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('jobportal', '0007_auto_20170314_2337'),
]
operations = [
migrations.RemoveField(
model_name='person',
name='join_date',
),
]
| {
"content_hash": "4ce8a04567ee32a39cac4d9bf2d757b8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 49,
"avg_line_length": 19.11764705882353,
"alnum_prop": 0.5876923076923077,
"repo_name": "klenks/jobsportal",
"id": "bb32713cad3ef761bc5847b3b339d1aab75152e6",
"size": "398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jobportal/migrations/0008_remove_person_join_date.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "138702"
},
{
"name": "HTML",
"bytes": "158529"
},
{
"name": "JavaScript",
"bytes": "250743"
},
{
"name": "Python",
"bytes": "7450092"
},
{
"name": "Shell",
"bytes": "3234"
}
],
"symlink_target": ""
} |
Import('env')
#
# Dump build environment (for debug)
#print env.Dump()
#
flags = " ".join(env['LINKFLAGS'])
flags = flags.replace("-u _printf_float", "")
flags = flags.replace("-u _scanf_float", "")
newflags = flags.split()
env.Replace(
LINKFLAGS=newflags
) | {
"content_hash": "e965058398902641b2bf17acf20b479c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 45,
"avg_line_length": 17.466666666666665,
"alnum_prop": 0.6564885496183206,
"repo_name": "Aircoookie/WLED",
"id": "da916ebe2b863128ca2c0c9f22e0960658817a3d",
"size": "262",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "pio-scripts/strip-floats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "330"
},
{
"name": "C",
"bytes": "1071112"
},
{
"name": "C++",
"bytes": "2179871"
},
{
"name": "CSS",
"bytes": "52415"
},
{
"name": "Dockerfile",
"bytes": "1132"
},
{
"name": "HTML",
"bytes": "190100"
},
{
"name": "JavaScript",
"bytes": "132178"
},
{
"name": "Python",
"bytes": "10228"
},
{
"name": "Shell",
"bytes": "359"
}
],
"symlink_target": ""
} |
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, '_unpacker.cp35-win32.pyd')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| {
"content_hash": "faf37b082e4cee76a8fb1b0d1e30fcf2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 84,
"avg_line_length": 42.42857142857143,
"alnum_prop": 0.5925925925925926,
"repo_name": "stevenzhang18/Indeed-Flask",
"id": "a65a01d8e3af7590b30b571683d43b754eb7cc74",
"size": "297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/pandas/msgpack/_unpacker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "45061"
},
{
"name": "HTML",
"bytes": "1386611"
},
{
"name": "JavaScript",
"bytes": "84693"
},
{
"name": "Python",
"bytes": "10498302"
}
],
"symlink_target": ""
} |
class Config(object):
"""Global configuration variables for all queries derived from ``mysqlstmt.Stmt``.
Values set on these class attributes will affect all queries.
Examples: ::
mysqlstmt.Config.placeholder = '%'
"""
placeholder = '?'
"""Parameterize queries using this placeholder.
* None = Per class instance
* String = Placeholder value to use unless instance overrides
* False = Disable parameterized queries
"""
quote_all_values = False
"""Call :py:meth:`mysqlstmt.stmt.Stmt.quote` for non-parameterized string values.
* None = Per class instance
* True = Always
* False = Never
"""
quote_all_col_refs = True
"""Quote all column references with backticks.
* None = Per class instance
* True = Always
* False = Never
"""
select_cacheable = None
"""Whether MySQL should cache SELECT results.
* None = Per class instance
* True = Always
* False = Never
"""
| {
"content_hash": "22c468c9cd115cbd3b8a984f6a2672c9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 86,
"avg_line_length": 24.195121951219512,
"alnum_prop": 0.6370967741935484,
"repo_name": "lovette/mysqlstmt",
"id": "51720b9914a1c46f14f8d90ae249ec299cef8d2b",
"size": "1018",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysqlstmt/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "159914"
}
],
"symlink_target": ""
} |
import collections
import itertools
import mimetypes
import time
import math
import random
from hashlib import md5
from swift import gettext_ as _
from urllib import unquote, quote
from greenlet import GreenletExit
from eventlet import GreenPile
from eventlet.queue import Queue
from eventlet.timeout import Timeout
from swift.common.utils import (
clean_content_type, config_true_value, ContextPool, csv_append,
GreenAsyncPile, GreenthreadSafeIterator, json, Timestamp,
normalize_delete_at_timestamp, public, get_expirer_container,
document_iters_to_http_response_body, parse_content_range,
quorum_size, reiterate, close_if_possible)
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_metadata, check_object_creation, \
check_copy_from_header, check_destination_header, \
check_account_format
from swift.common import constraints
from swift.common.exceptions import ChunkReadTimeout, \
ChunkWriteTimeout, ConnectionTimeout, ResponseTimeout, \
InsufficientStorage, FooterNotSupported, MultiphasePUTNotSupported, \
PutterConnectError, ChunkReadError
from swift.common.http import (
is_informational, is_success, is_client_error, is_server_error,
HTTP_CONTINUE, HTTP_CREATED, HTTP_MULTIPLE_CHOICES,
HTTP_INTERNAL_SERVER_ERROR, HTTP_SERVICE_UNAVAILABLE,
HTTP_INSUFFICIENT_STORAGE, HTTP_PRECONDITION_FAILED, HTTP_CONFLICT)
from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY,
ECDriverError, PolicyError)
from swift.proxy.controllers.base import Controller, delay_denial, \
cors_validation, ResumingGetter
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \
HTTPServerError, HTTPServiceUnavailable, Request, HeaderKeyDict, \
HTTPClientDisconnect, HTTPUnprocessableEntity, Response, HTTPException, \
HTTPRequestedRangeNotSatisfiable, Range
from swift.common.request_helpers import is_sys_or_user_meta, is_sys_meta, \
remove_items, copy_header_subset
def copy_headers_into(from_r, to_r):
"""
Will copy desired headers from from_r to to_r
:params from_r: a swob Request or Response
:params to_r: a swob Request or Response
"""
pass_headers = ['x-delete-at']
for k, v in from_r.headers.items():
if is_sys_or_user_meta('object', k) or k.lower() in pass_headers:
to_r.headers[k] = v
def check_content_type(req):
if not req.environ.get('swift.content_type_overridden') and \
';' in req.headers.get('content-type', ''):
for param in req.headers['content-type'].split(';')[1:]:
if param.lstrip().startswith('swift_'):
return HTTPBadRequest("Invalid Content-Type, "
"swift_* is not a valid parameter name.")
return None
class ObjectControllerRouter(object):
policy_type_to_controller_map = {}
@classmethod
def register(cls, policy_type):
"""
Decorator for Storage Policy implemenations to register
their ObjectController implementations.
This also fills in a policy_type attribute on the class.
"""
def register_wrapper(controller_cls):
if policy_type in cls.policy_type_to_controller_map:
raise PolicyError(
'%r is already registered for the policy_type %r' % (
cls.policy_type_to_controller_map[policy_type],
policy_type))
cls.policy_type_to_controller_map[policy_type] = controller_cls
controller_cls.policy_type = policy_type
return controller_cls
return register_wrapper
def __init__(self):
self.policy_to_controller_cls = {}
for policy in POLICIES:
self.policy_to_controller_cls[policy] = \
self.policy_type_to_controller_map[policy.policy_type]
def __getitem__(self, policy):
return self.policy_to_controller_cls[policy]
class BaseObjectController(Controller):
"""Base WSGI controller for object requests."""
server_type = 'Object'
def __init__(self, app, account_name, container_name, object_name,
**kwargs):
Controller.__init__(self, app)
self.account_name = unquote(account_name)
self.container_name = unquote(container_name)
self.object_name = unquote(object_name)
def iter_nodes_local_first(self, ring, partition):
"""
Yields nodes for a ring partition.
If the 'write_affinity' setting is non-empty, then this will yield N
local nodes (as defined by the write_affinity setting) first, then the
rest of the nodes as normal. It is a re-ordering of the nodes such
that the local ones come first; no node is omitted. The effect is
that the request will be serviced by local object servers first, but
nonlocal ones will be employed if not enough local ones are available.
:param ring: ring to get nodes from
:param partition: ring partition to yield nodes for
"""
is_local = self.app.write_affinity_is_local_fn
if is_local is None:
return self.app.iter_nodes(ring, partition)
primary_nodes = ring.get_part_nodes(partition)
num_locals = self.app.write_affinity_node_count(len(primary_nodes))
all_nodes = itertools.chain(primary_nodes,
ring.get_more_nodes(partition))
first_n_local_nodes = list(itertools.islice(
itertools.ifilter(is_local, all_nodes), num_locals))
# refresh it; it moved when we computed first_n_local_nodes
all_nodes = itertools.chain(primary_nodes,
ring.get_more_nodes(partition))
local_first_node_iter = itertools.chain(
first_n_local_nodes,
itertools.ifilter(lambda node: node not in first_n_local_nodes,
all_nodes))
return self.app.iter_nodes(
ring, partition, node_iter=local_first_node_iter)
def GETorHEAD(self, req):
"""Handle HTTP GET or HEAD requests."""
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['read_acl']
# pass the policy index to storage nodes via req header
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
policy = POLICIES.get_by_index(policy_index)
obj_ring = self.app.get_object_ring(policy_index)
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
partition = obj_ring.get_part(
self.account_name, self.container_name, self.object_name)
node_iter = self.app.iter_nodes(obj_ring, partition)
resp = self._reroute(policy)._get_or_head_response(
req, node_iter, partition, policy)
if ';' in resp.headers.get('content-type', ''):
resp.content_type = clean_content_type(
resp.headers['content-type'])
return resp
@public
@cors_validation
@delay_denial
def GET(self, req):
"""Handler for HTTP GET requests."""
return self.GETorHEAD(req)
@public
@cors_validation
@delay_denial
def HEAD(self, req):
"""Handler for HTTP HEAD requests."""
return self.GETorHEAD(req)
@public
@cors_validation
@delay_denial
def POST(self, req):
"""HTTP POST request handler."""
if self.app.object_post_as_copy:
req.method = 'PUT'
req.path_info = '/v1/%s/%s/%s' % (
self.account_name, self.container_name, self.object_name)
req.headers['Content-Length'] = 0
req.headers['X-Copy-From'] = quote('/%s/%s' % (self.container_name,
self.object_name))
req.environ['swift.post_as_copy'] = True
req.environ['swift_versioned_copy'] = True
resp = self.PUT(req)
# Older editions returned 202 Accepted on object POSTs, so we'll
# convert any 201 Created responses to that for compatibility with
# picky clients.
if resp.status_int != HTTP_CREATED:
return resp
return HTTPAccepted(request=req)
else:
error_response = check_metadata(req, 'object')
if error_response:
return error_response
container_info = self.container_info(
self.account_name, self.container_name, req)
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not containers:
return HTTPNotFound(request=req)
req, delete_at_container, delete_at_part, \
delete_at_nodes = self._config_obj_expiration(req)
# pass the policy index to storage nodes via req header
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
obj_ring = self.app.get_object_ring(policy_index)
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
req.headers['X-Timestamp'] = Timestamp(time.time()).internal
headers = self._backend_requests(
req, len(nodes), container_partition, containers,
delete_at_container, delete_at_part, delete_at_nodes)
return self._post_object(req, obj_ring, partition, headers)
def _backend_requests(self, req, n_outgoing,
container_partition, containers,
delete_at_container=None, delete_at_partition=None,
delete_at_nodes=None):
policy_index = req.headers['X-Backend-Storage-Policy-Index']
policy = POLICIES.get_by_index(policy_index)
headers = [self.generate_request_headers(req, additional=req.headers)
for _junk in range(n_outgoing)]
def set_container_update(index, container):
headers[index]['X-Container-Partition'] = container_partition
headers[index]['X-Container-Host'] = csv_append(
headers[index].get('X-Container-Host'),
'%(ip)s:%(port)s' % container)
headers[index]['X-Container-Device'] = csv_append(
headers[index].get('X-Container-Device'),
container['device'])
for i, container in enumerate(containers):
i = i % len(headers)
set_container_update(i, container)
# if # of container_updates is not enough against # of replicas
# (or fragments). Fill them like as pigeon hole problem.
# TODO?: apply these to X-Delete-At-Container?
n_updates_needed = min(policy.quorum + 1, n_outgoing)
container_iter = itertools.cycle(containers)
existing_updates = len(containers)
while existing_updates < n_updates_needed:
set_container_update(existing_updates, next(container_iter))
existing_updates += 1
for i, node in enumerate(delete_at_nodes or []):
i = i % len(headers)
headers[i]['X-Delete-At-Container'] = delete_at_container
headers[i]['X-Delete-At-Partition'] = delete_at_partition
headers[i]['X-Delete-At-Host'] = csv_append(
headers[i].get('X-Delete-At-Host'),
'%(ip)s:%(port)s' % node)
headers[i]['X-Delete-At-Device'] = csv_append(
headers[i].get('X-Delete-At-Device'),
node['device'])
return headers
def _await_response(self, conn, **kwargs):
with Timeout(self.app.node_timeout):
if conn.resp:
return conn.resp
else:
return conn.getresponse()
def _get_conn_response(self, conn, req, **kwargs):
try:
resp = self._await_response(conn, **kwargs)
return (conn, resp)
except (Exception, Timeout):
self.app.exception_occurred(
conn.node, _('Object'),
_('Trying to get final status of PUT to %s') % req.path)
return (None, None)
def _get_put_responses(self, req, conns, nodes, **kwargs):
"""
Collect replicated object responses.
"""
statuses = []
reasons = []
bodies = []
etags = set()
pile = GreenAsyncPile(len(conns))
for conn in conns:
pile.spawn(self._get_conn_response, conn, req)
def _handle_response(conn, response):
statuses.append(response.status)
reasons.append(response.reason)
bodies.append(response.read())
if response.status == HTTP_INSUFFICIENT_STORAGE:
self.app.error_limit(conn.node,
_('ERROR Insufficient Storage'))
elif response.status >= HTTP_INTERNAL_SERVER_ERROR:
self.app.error_occurred(
conn.node,
_('ERROR %(status)d %(body)s From Object Server '
're: %(path)s') %
{'status': response.status,
'body': bodies[-1][:1024], 'path': req.path})
elif is_success(response.status):
etags.add(response.getheader('etag').strip('"'))
for (conn, response) in pile:
if response:
_handle_response(conn, response)
if self.have_quorum(statuses, len(nodes)):
break
# give any pending requests *some* chance to finish
finished_quickly = pile.waitall(self.app.post_quorum_timeout)
for (conn, response) in finished_quickly:
if response:
_handle_response(conn, response)
while len(statuses) < len(nodes):
statuses.append(HTTP_SERVICE_UNAVAILABLE)
reasons.append('')
bodies.append('')
return statuses, reasons, bodies, etags
def _config_obj_expiration(self, req):
delete_at_container = None
delete_at_part = None
delete_at_nodes = None
req = constraints.check_delete_headers(req)
if 'x-delete-at' in req.headers:
x_delete_at = int(normalize_delete_at_timestamp(
int(req.headers['x-delete-at'])))
req.environ.setdefault('swift.log_info', []).append(
'x-delete-at:%s' % x_delete_at)
delete_at_container = get_expirer_container(
x_delete_at, self.app.expiring_objects_container_divisor,
self.account_name, self.container_name, self.object_name)
delete_at_part, delete_at_nodes = \
self.app.container_ring.get_nodes(
self.app.expiring_objects_account, delete_at_container)
return req, delete_at_container, delete_at_part, delete_at_nodes
def _handle_copy_request(self, req):
"""
This method handles copying objects based on values set in the headers
'X-Copy-From' and 'X-Copy-From-Account'
This method was added as part of the refactoring of the PUT method and
the functionality is expected to be moved to middleware
"""
if req.environ.get('swift.orig_req_method', req.method) != 'POST':
req.environ.setdefault('swift.log_info', []).append(
'x-copy-from:%s' % req.headers['X-Copy-From'])
ver, acct, _rest = req.split_path(2, 3, True)
src_account_name = req.headers.get('X-Copy-From-Account', None)
if src_account_name:
src_account_name = check_account_format(req, src_account_name)
else:
src_account_name = acct
src_container_name, src_obj_name = check_copy_from_header(req)
source_header = '/%s/%s/%s/%s' % (
ver, src_account_name, src_container_name, src_obj_name)
source_req = req.copy_get()
# make sure the source request uses it's container_info
source_req.headers.pop('X-Backend-Storage-Policy-Index', None)
source_req.path_info = source_header
source_req.headers['X-Newest'] = 'true'
orig_obj_name = self.object_name
orig_container_name = self.container_name
orig_account_name = self.account_name
sink_req = Request.blank(req.path_info,
environ=req.environ, headers=req.headers)
self.object_name = src_obj_name
self.container_name = src_container_name
self.account_name = src_account_name
source_resp = self.GET(source_req)
# This gives middlewares a way to change the source; for example,
# this lets you COPY a SLO manifest and have the new object be the
# concatenation of the segments (like what a GET request gives
# the client), not a copy of the manifest file.
hook = req.environ.get(
'swift.copy_hook',
(lambda source_req, source_resp, sink_req: source_resp))
source_resp = hook(source_req, source_resp, sink_req)
# reset names
self.object_name = orig_obj_name
self.container_name = orig_container_name
self.account_name = orig_account_name
if source_resp.status_int >= HTTP_MULTIPLE_CHOICES:
# this is a bit of ugly code, but I'm willing to live with it
# until copy request handling moves to middleware
return source_resp, None, None, None
if source_resp.content_length is None:
# This indicates a transfer-encoding: chunked source object,
# which currently only happens because there are more than
# CONTAINER_LISTING_LIMIT segments in a segmented object. In
# this case, we're going to refuse to do the server-side copy.
raise HTTPRequestEntityTooLarge(request=req)
if source_resp.content_length > constraints.MAX_FILE_SIZE:
raise HTTPRequestEntityTooLarge(request=req)
data_source = iter(source_resp.app_iter)
sink_req.content_length = source_resp.content_length
sink_req.etag = source_resp.etag
# we no longer need the X-Copy-From header
del sink_req.headers['X-Copy-From']
if 'X-Copy-From-Account' in sink_req.headers:
del sink_req.headers['X-Copy-From-Account']
if not req.content_type_manually_set:
sink_req.headers['Content-Type'] = \
source_resp.headers['Content-Type']
fresh_meta_flag = config_true_value(
sink_req.headers.get('x-fresh-metadata', 'false'))
if fresh_meta_flag or 'swift.post_as_copy' in sink_req.environ:
# post-as-copy: ignore new sysmeta, copy existing sysmeta
condition = lambda k: is_sys_meta('object', k)
remove_items(sink_req.headers, condition)
copy_header_subset(source_resp, sink_req, condition)
else:
# copy/update existing sysmeta and user meta
copy_headers_into(source_resp, sink_req)
copy_headers_into(req, sink_req)
# copy over x-static-large-object for POSTs and manifest copies
if 'X-Static-Large-Object' in source_resp.headers and \
(req.params.get('multipart-manifest') == 'get' or
'swift.post_as_copy' in req.environ):
sink_req.headers['X-Static-Large-Object'] = \
source_resp.headers['X-Static-Large-Object']
req = sink_req
def update_response(req, resp):
acct, path = source_resp.environ['PATH_INFO'].split('/', 3)[2:4]
resp.headers['X-Copied-From-Account'] = quote(acct)
resp.headers['X-Copied-From'] = quote(path)
if 'last-modified' in source_resp.headers:
resp.headers['X-Copied-From-Last-Modified'] = \
source_resp.headers['last-modified']
copy_headers_into(req, resp)
return resp
# this is a bit of ugly code, but I'm willing to live with it
# until copy request handling moves to middleware
return None, req, data_source, update_response
def _update_content_type(self, req):
# Sometimes the 'content-type' header exists, but is set to None.
req.content_type_manually_set = True
detect_content_type = \
config_true_value(req.headers.get('x-detect-content-type'))
if detect_content_type or not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
if detect_content_type:
req.headers.pop('x-detect-content-type')
else:
req.content_type_manually_set = False
def _update_x_timestamp(self, req):
# Used by container sync feature
if 'x-timestamp' in req.headers:
try:
req_timestamp = Timestamp(req.headers['X-Timestamp'])
except ValueError:
raise HTTPBadRequest(
request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
req.headers['X-Timestamp'] = req_timestamp.internal
else:
req.headers['X-Timestamp'] = Timestamp(time.time()).internal
return None
def _check_failure_put_connections(self, conns, req, nodes, min_conns):
"""
Identify any failed connections and check minimum connection count.
"""
if req.if_none_match is not None and '*' in req.if_none_match:
statuses = [conn.resp.status for conn in conns if conn.resp]
if HTTP_PRECONDITION_FAILED in statuses:
# If we find any copy of the file, it shouldn't be uploaded
self.app.logger.debug(
_('Object PUT returning 412, %(statuses)r'),
{'statuses': statuses})
raise HTTPPreconditionFailed(request=req)
if any(conn for conn in conns if conn.resp and
conn.resp.status == HTTP_CONFLICT):
status_times = ['%(status)s (%(timestamp)s)' % {
'status': conn.resp.status,
'timestamp': HeaderKeyDict(
conn.resp.getheaders()).get(
'X-Backend-Timestamp', 'unknown')
} for conn in conns if conn.resp]
self.app.logger.debug(
_('Object PUT returning 202 for 409: '
'%(req_timestamp)s <= %(timestamps)r'),
{'req_timestamp': req.timestamp.internal,
'timestamps': ', '.join(status_times)})
raise HTTPAccepted(request=req)
self._check_min_conn(req, conns, min_conns)
def _connect_put_node(self, nodes, part, path, headers,
logger_thread_locals):
"""
Make connection to storage nodes
Connects to the first working node that it finds in nodes iter
and sends over the request headers. Returns an HTTPConnection
object to handle the rest of the streaming.
This method must be implemented by each policy ObjectController.
:param nodes: an iterator of the target storage nodes
:param partition: ring partition number
:param path: the object path to send to the storage node
:param headers: request headers
:param logger_thread_locals: The thread local values to be set on the
self.app.logger to retain transaction
logging information.
:return: HTTPConnection object
"""
raise NotImplementedError()
def _get_put_connections(self, req, nodes, partition, outgoing_headers,
policy, expect):
"""
Establish connections to storage nodes for PUT request
"""
obj_ring = policy.object_ring
node_iter = GreenthreadSafeIterator(
self.iter_nodes_local_first(obj_ring, partition))
pile = GreenPile(len(nodes))
for nheaders in outgoing_headers:
if expect:
nheaders['Expect'] = '100-continue'
pile.spawn(self._connect_put_node, node_iter, partition,
req.swift_entity_path, nheaders,
self.app.logger.thread_locals)
conns = [conn for conn in pile if conn]
return conns
def _check_min_conn(self, req, conns, min_conns, msg=None):
msg = msg or 'Object PUT returning 503, %(conns)s/%(nodes)s ' \
'required connections'
if len(conns) < min_conns:
self.app.logger.error((msg),
{'conns': len(conns), 'nodes': min_conns})
raise HTTPServiceUnavailable(request=req)
def _store_object(self, req, data_source, nodes, partition,
outgoing_headers):
"""
This method is responsible for establishing connection
with storage nodes and sending the data to each one of those
nodes. The process of transferring data is specific to each
Storage Policy, thus it is required for each policy specific
ObjectController to provide their own implementation of this method.
:param req: the PUT Request
:param data_source: an iterator of the source of the data
:param nodes: an iterator of the target storage nodes
:param partition: ring partition number
:param outgoing_headers: system headers to storage nodes
:return: Response object
"""
raise NotImplementedError()
def _delete_object(self, req, obj_ring, partition, headers):
"""
send object DELETE request to storage nodes. Subclasses of
the BaseObjectController can provide their own implementation
of this method.
:param req: the DELETE Request
:param obj_ring: the object ring
:param partition: ring partition number
:param headers: system headers to storage nodes
:return: Response object
"""
# When deleting objects treat a 404 status as 204.
status_overrides = {404: 204}
resp = self.make_requests(req, obj_ring,
partition, 'DELETE', req.swift_entity_path,
headers, overrides=status_overrides)
return resp
def _post_object(self, req, obj_ring, partition, headers):
"""
send object POST request to storage nodes.
:param req: the POST Request
:param obj_ring: the object ring
:param partition: ring partition number
:param headers: system headers to storage nodes
:return: Response object
"""
resp = self.make_requests(req, obj_ring, partition,
'POST', req.swift_entity_path, headers)
return resp
@public
@cors_validation
@delay_denial
def PUT(self, req):
"""HTTP PUT request handler."""
fw = open("/tmp/log/swift/complete_log","a")
start_time = time.time()
fw.write('Started Proxy PUT at ' + str(start_time) + "\n")
if req.if_none_match is not None and '*' not in req.if_none_match:
# Sending an etag with if-none-match isn't currently supported
return HTTPBadRequest(request=req, content_type='text/plain',
body='If-None-Match only supports *')
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
time_a = time.time()
fw.write('Entering get_object_ring at ' + str(time_a) + "\n")
obj_ring = self.app.get_object_ring(policy_index)
time_a_end = time.time()
fw.write('Returned from get_object_ring at ' + str(time_a_end) + " with time taken as " + str((time_a_end - time_a)*1000) + "\n")
container_nodes = container_info['nodes']
container_partition = container_info['partition']
time_b = time.time()
fw.write('Entering get_nodes at ' + str(time_b) + "\n")
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
time_b_end = time.time()
fw.write('Returned from get_nodes at ' + str(time_b_end) + " with time taken as " + str((time_b_end - time_b)*1000) + "\n")
# pass the policy index to storage nodes via req header
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
# is request authorized
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not container_info['nodes']:
return HTTPNotFound(request=req)
# update content type in case it is missing
self._update_content_type(req)
# check constraints on object name and request headers
time_c = time.time()
fw.write('Entering check_object_creation at ' + str(time_c) + "\n")
error_response = check_object_creation(req, self.object_name) or \
check_content_type(req)
time_c_end = time.time()
fw.write('Returned from check_object_creation at ' + str(time_c_end) + " with time taken as " + str((time_c_end - time_c)*1000) + "\n")
if error_response:
return error_response
self._update_x_timestamp(req)
# check if request is a COPY of an existing object
source_header = req.headers.get('X-Copy-From')
if source_header:
error_response, req, data_source, update_response = \
self._handle_copy_request(req)
if error_response:
return error_response
else:
def reader():
try:
return req.environ['wsgi.input'].read(
self.app.client_chunk_size)
except (ValueError, IOError) as e:
raise ChunkReadError(str(e))
data_source = iter(reader, '')
update_response = lambda req, resp: resp
# check if object is set to be automatically deleted (i.e. expired)
time_d = time.time()
fw.write('Entering _config_obj_expiration at ' + str(time_d) + "\n")
req, delete_at_container, delete_at_part, \
delete_at_nodes = self._config_obj_expiration(req)
time_d_end = time.time()
fw.write('Returned from _config_obj_expiration at ' + str(time_d_end) + " with time taken as " + str((time_d_end - time_d)*1000) + "\n")
# add special headers to be handled by storage nodes
time_e = time.time()
fw.write('Entering _backend_requests at ' + str(time_e) + "\n")
outgoing_headers = self._backend_requests(
req, len(nodes), container_partition, container_nodes,
delete_at_container, delete_at_part, delete_at_nodes)
time_e_end = time.time()
fw.write('Returned from _backend_requests at ' + str(time_e_end) + " with time taken as " + str((time_e_end - time_e)*1000) + "\n")
# send object to storage nodes
time_f = time.time()
fw.write('Entering _store_object at ' + str(time_f) + "\n")
resp = self._store_object(
req, data_source, nodes, partition, outgoing_headers)
time_f_end = time.time()
fw.write('Returned from _store_object at ' + str(time_f_end) + " with time taken as " + str((time_f_end - time_f)*1000) + "\n")
end_time = time.time()
fw.write('Leaving Proxy PUT at ' + str(end_time) + " with time taken as " + str((end_time - start_time)*1000) + "\n")
fw.close()
return update_response(req, resp)
@public
@cors_validation
@delay_denial
def DELETE(self, req):
"""HTTP DELETE request handler."""
container_info = self.container_info(
self.account_name, self.container_name, req)
# pass the policy index to storage nodes via req header
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
obj_ring = self.app.get_object_ring(policy_index)
# pass the policy index to storage nodes via req header
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
container_partition = container_info['partition']
containers = container_info['nodes']
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if not containers:
return HTTPNotFound(request=req)
partition, nodes = obj_ring.get_nodes(
self.account_name, self.container_name, self.object_name)
# Used by container sync feature
if 'x-timestamp' in req.headers:
try:
req_timestamp = Timestamp(req.headers['X-Timestamp'])
except ValueError:
return HTTPBadRequest(
request=req, content_type='text/plain',
body='X-Timestamp should be a UNIX timestamp float value; '
'was %r' % req.headers['x-timestamp'])
req.headers['X-Timestamp'] = req_timestamp.internal
else:
req.headers['X-Timestamp'] = Timestamp(time.time()).internal
headers = self._backend_requests(
req, len(nodes), container_partition, containers)
return self._delete_object(req, obj_ring, partition, headers)
def _reroute(self, policy):
"""
For COPY requests we need to make sure the controller instance the
request is routed through is the correct type for the policy.
"""
if not policy:
raise HTTPServiceUnavailable('Unknown Storage Policy')
if policy.policy_type != self.policy_type:
controller = self.app.obj_controller_router[policy](
self.app, self.account_name, self.container_name,
self.object_name)
else:
controller = self
return controller
@public
@cors_validation
@delay_denial
def COPY(self, req):
"""HTTP COPY request handler."""
if not req.headers.get('Destination'):
return HTTPPreconditionFailed(request=req,
body='Destination header required')
dest_account = self.account_name
if 'Destination-Account' in req.headers:
dest_account = req.headers.get('Destination-Account')
dest_account = check_account_format(req, dest_account)
req.headers['X-Copy-From-Account'] = self.account_name
self.account_name = dest_account
del req.headers['Destination-Account']
dest_container, dest_object = check_destination_header(req)
source = '/%s/%s' % (self.container_name, self.object_name)
self.container_name = dest_container
self.object_name = dest_object
# re-write the existing request as a PUT instead of creating a new one
# since this one is already attached to the posthooklogger
# TODO: Swift now has proxy-logging middleware instead of
# posthooklogger used in before. i.e. we don't have to
# keep the code depends on evnetlet.posthooks sequence, IMHO.
# However, creating a new sub request might
# cause the possibility to hide some bugs behindes the request
# so that we should discuss whichi is suitable (new-sub-request
# vs re-write-existing-request) for Swift. [kota_]
req.method = 'PUT'
req.path_info = '/v1/%s/%s/%s' % \
(dest_account, dest_container, dest_object)
req.headers['Content-Length'] = 0
req.headers['X-Copy-From'] = quote(source)
del req.headers['Destination']
container_info = self.container_info(
dest_account, dest_container, req)
dest_policy = POLICIES.get_by_index(container_info['storage_policy'])
return self._reroute(dest_policy).PUT(req)
@ObjectControllerRouter.register(REPL_POLICY)
class ReplicatedObjectController(BaseObjectController):
def _get_or_head_response(self, req, node_iter, partition, policy):
resp = self.GETorHEAD_base(
req, _('Object'), node_iter, partition,
req.swift_entity_path)
return resp
def _connect_put_node(self, nodes, part, path, headers,
logger_thread_locals):
"""
Make a connection for a replicated object.
Connects to the first working node that it finds in node_iter
and sends over the request headers. Returns an HTTPConnection
object to handle the rest of the streaming.
"""
self.app.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
start_time = time.time()
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
node['ip'], node['port'], node['device'], part, 'PUT',
path, headers)
self.app.set_node_timing(node, time.time() - start_time)
with Timeout(self.app.node_timeout):
resp = conn.getexpect()
if resp.status == HTTP_CONTINUE:
conn.resp = None
conn.node = node
return conn
elif is_success(resp.status) or resp.status == HTTP_CONFLICT:
conn.resp = resp
conn.node = node
return conn
elif headers['If-None-Match'] is not None and \
resp.status == HTTP_PRECONDITION_FAILED:
conn.resp = resp
conn.node = node
return conn
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
self.app.error_limit(node, _('ERROR Insufficient Storage'))
elif is_server_error(resp.status):
self.app.error_occurred(
node,
_('ERROR %(status)d Expect: 100-continue '
'From Object Server') % {
'status': resp.status})
except (Exception, Timeout):
self.app.exception_occurred(
node, _('Object'),
_('Expect: 100-continue on %s') % path)
def _send_file(self, conn, path):
"""Method for a file PUT coro"""
while True:
chunk = conn.queue.get()
if not conn.failed:
try:
with ChunkWriteTimeout(self.app.node_timeout):
conn.send(chunk)
except (Exception, ChunkWriteTimeout):
conn.failed = True
self.app.exception_occurred(
conn.node, _('Object'),
_('Trying to write to %s') % path)
conn.queue.task_done()
def _transfer_data(self, req, data_source, conns, nodes):
"""
Transfer data for a replicated object.
This method was added in the PUT method extraction change
"""
min_conns = quorum_size(len(nodes))
bytes_transferred = 0
try:
with ContextPool(len(nodes)) as pool:
for conn in conns:
conn.failed = False
conn.queue = Queue(self.app.put_queue_depth)
pool.spawn(self._send_file, conn, req.path)
while True:
with ChunkReadTimeout(self.app.client_timeout):
try:
chunk = next(data_source)
except StopIteration:
if req.is_chunked:
for conn in conns:
conn.queue.put('0\r\n\r\n')
break
bytes_transferred += len(chunk)
if bytes_transferred > constraints.MAX_FILE_SIZE:
raise HTTPRequestEntityTooLarge(request=req)
for conn in list(conns):
if not conn.failed:
conn.queue.put(
'%x\r\n%s\r\n' % (len(chunk), chunk)
if req.is_chunked else chunk)
else:
conn.close()
conns.remove(conn)
self._check_min_conn(
req, conns, min_conns,
msg='Object PUT exceptions during'
' send, %(conns)s/%(nodes)s required connections')
for conn in conns:
if conn.queue.unfinished_tasks:
conn.queue.join()
conns = [conn for conn in conns if not conn.failed]
self._check_min_conn(
req, conns, min_conns,
msg='Object PUT exceptions after last send, '
'%(conns)s/%(nodes)s required connections')
except ChunkReadTimeout as err:
self.app.logger.warn(
_('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req)
except HTTPException:
raise
except ChunkReadError:
req.client_disconnect = True
self.app.logger.warn(
_('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
except (Exception, Timeout):
self.app.logger.exception(
_('ERROR Exception causing client disconnect'))
raise HTTPClientDisconnect(request=req)
if req.content_length and bytes_transferred < req.content_length:
req.client_disconnect = True
self.app.logger.warn(
_('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
def _store_object(self, req, data_source, nodes, partition,
outgoing_headers):
"""
Store a replicated object.
This method is responsible for establishing connection
with storage nodes and sending object to each one of those
nodes. After sending the data, the "best" response will be
returned based on statuses from all connections
"""
fw = open("/tmp/log/swift/complete_log","a")
start_time = time.time()
fw.write("\nStarted store_object at " + str(start_time) + "\n")
policy_index = req.headers.get('X-Backend-Storage-Policy-Index')
policy = POLICIES.get_by_index(policy_index)
if not nodes:
return HTTPNotFound()
# RFC2616:8.2.3 disallows 100-continue without a body
if (req.content_length > 0) or req.is_chunked:
expect = True
else:
expect = False
time_a = time.time()
fw.write('Entering _get_put_connections at ' + str(time_a) + "\n")
conns = self._get_put_connections(req, nodes, partition,
outgoing_headers, policy, expect)
time_a_end = time.time()
fw.write('Returned from _get_put_connections at ' + str(time_a_end) + " with time taken as " + str((time_a_end - time_a)*1000) + "\n")
min_conns = quorum_size(len(nodes))
try:
# check that a minimum number of connections were established and
# meet all the correct conditions set in the request
self._check_failure_put_connections(conns, req, nodes, min_conns)
# transfer data
time_b = time.time()
fw.write('Entering _transfer_data at ' + str(time_b) + "\n")
self._transfer_data(req, data_source, conns, nodes)
time_b_end = time.time()
fw.write('Returned from _transfer_data at ' + str(time_b_end) + " with time taken as " + str((time_b_end - time_b)*1000) + "\n")
# get responses
time_c = time.time()
fw.write('Entering _get_put_responses at ' + str(time_c) + "\n")
statuses, reasons, bodies, etags = self._get_put_responses(
req, conns, nodes)
time_c_end = time.time()
fw.write('Returned from _get_put_responses at ' + str(time_c_end) + " with time taken as " + str((time_c_end - time_c)*1000) + "\n")
except HTTPException as resp:
return resp
finally:
for conn in conns:
conn.close()
if len(etags) > 1:
self.app.logger.error(
_('Object servers returned %s mismatched etags'), len(etags))
return HTTPServerError(request=req)
etag = etags.pop() if len(etags) else None
resp = self.best_response(req, statuses, reasons, bodies,
_('Object PUT'), etag=etag)
resp.last_modified = math.ceil(
float(Timestamp(req.headers['X-Timestamp'])))
end_time = time.time()
fw.write("Returning from _store_object at " + str(end_time) + " with time taken as " + str((end_time - start_time)*1000) + "\n")
fw.close()
return resp
class ECAppIter(object):
"""
WSGI iterable that decodes EC fragment archives (or portions thereof)
into the original object (or portions thereof).
:param path: object's path, sans v1 (e.g. /a/c/o)
:param policy: storage policy for this object
:param internal_parts_iters: list of the response-document-parts
iterators for the backend GET responses. For an M+K erasure code,
the caller must supply M such iterables.
:param range_specs: list of dictionaries describing the ranges requested
by the client. Each dictionary contains the start and end of the
client's requested byte range as well as the start and end of the EC
segments containing that byte range.
:param fa_length: length of the fragment archive, in bytes, if the
response is a 200. If it's a 206, then this is ignored.
:param obj_length: length of the object, in bytes. Learned from the
headers in the GET response from the object server.
:param logger: a logger
"""
def __init__(self, path, policy, internal_parts_iters, range_specs,
fa_length, obj_length, logger):
self.path = path
self.policy = policy
self.internal_parts_iters = internal_parts_iters
self.range_specs = range_specs
self.fa_length = fa_length
self.obj_length = obj_length if obj_length is not None else 0
self.boundary = ''
self.logger = logger
self.mime_boundary = None
self.learned_content_type = None
self.stashed_iter = None
def close(self):
for it in self.internal_parts_iters:
close_if_possible(it)
def kickoff(self, req, resp):
"""
Start pulling data from the backends so that we can learn things like
the real Content-Type that might only be in the multipart/byteranges
response body. Update our response accordingly.
Also, this is the first point at which we can learn the MIME
boundary that our response has in the headers. We grab that so we
can also use it in the body.
:returns: None
:raises: HTTPException on error
"""
self.mime_boundary = resp.boundary
self.stashed_iter = reiterate(self._real_iter(req, resp.headers))
if self.learned_content_type is not None:
resp.content_type = self.learned_content_type
resp.content_length = self.obj_length
def _next_range(self):
# Each FA part should have approximately the same headers. We really
# only care about Content-Range and Content-Type, and that'll be the
# same for all the different FAs.
frag_iters = []
headers = None
for parts_iter in self.internal_parts_iters:
part_info = next(parts_iter)
frag_iters.append(part_info['part_iter'])
headers = part_info['headers']
headers = HeaderKeyDict(headers)
return headers, frag_iters
def _actual_range(self, req_start, req_end, entity_length):
try:
rng = Range("bytes=%s-%s" % (
req_start if req_start is not None else '',
req_end if req_end is not None else ''))
except ValueError:
return (None, None)
rfl = rng.ranges_for_length(entity_length)
if not rfl:
return (None, None)
else:
# ranges_for_length() adds 1 to the last byte's position
# because webob once made a mistake
return (rfl[0][0], rfl[0][1] - 1)
def _fill_out_range_specs_from_obj_length(self, range_specs):
# Add a few fields to each range spec:
#
# * resp_client_start, resp_client_end: the actual bytes that will
# be delivered to the client for the requested range. This may
# differ from the requested bytes if, say, the requested range
# overlaps the end of the object.
#
# * resp_segment_start, resp_segment_end: the actual offsets of the
# segments that will be decoded for the requested range. These
# differ from resp_client_start/end in that these are aligned
# to segment boundaries, while resp_client_start/end are not
# necessarily so.
#
# * satisfiable: a boolean indicating whether the range is
# satisfiable or not (i.e. the requested range overlaps the
# object in at least one byte).
#
# This is kept separate from _fill_out_range_specs_from_fa_length()
# because this computation can be done with just the response
# headers from the object servers (in particular
# X-Object-Sysmeta-Ec-Content-Length), while the computation in
# _fill_out_range_specs_from_fa_length() requires the beginnings of
# the response bodies.
for spec in range_specs:
cstart, cend = self._actual_range(
spec['req_client_start'],
spec['req_client_end'],
self.obj_length)
spec['resp_client_start'] = cstart
spec['resp_client_end'] = cend
spec['satisfiable'] = (cstart is not None and cend is not None)
sstart, send = self._actual_range(
spec['req_segment_start'],
spec['req_segment_end'],
self.obj_length)
seg_size = self.policy.ec_segment_size
if spec['req_segment_start'] is None and sstart % seg_size != 0:
# Segment start may, in the case of a suffix request, need
# to be rounded up (not down!) to the nearest segment boundary.
# This reflects the trimming of leading garbage (partial
# fragments) from the retrieved fragments.
sstart += seg_size - (sstart % seg_size)
spec['resp_segment_start'] = sstart
spec['resp_segment_end'] = send
def _fill_out_range_specs_from_fa_length(self, fa_length, range_specs):
# Add two fields to each range spec:
#
# * resp_fragment_start, resp_fragment_end: the start and end of
# the fragments that compose this byterange. These values are
# aligned to fragment boundaries.
#
# This way, ECAppIter has the knowledge it needs to correlate
# response byteranges with requested ones for when some byteranges
# are omitted from the response entirely and also to put the right
# Content-Range headers in a multipart/byteranges response.
for spec in range_specs:
fstart, fend = self._actual_range(
spec['req_fragment_start'],
spec['req_fragment_end'],
fa_length)
spec['resp_fragment_start'] = fstart
spec['resp_fragment_end'] = fend
def __iter__(self):
if self.stashed_iter is not None:
return iter(self.stashed_iter)
else:
raise ValueError("Failed to call kickoff() before __iter__()")
def _real_iter(self, req, resp_headers):
if not self.range_specs:
client_asked_for_range = False
range_specs = [{
'req_client_start': 0,
'req_client_end': (None if self.obj_length is None
else self.obj_length - 1),
'resp_client_start': 0,
'resp_client_end': (None if self.obj_length is None
else self.obj_length - 1),
'req_segment_start': 0,
'req_segment_end': (None if self.obj_length is None
else self.obj_length - 1),
'resp_segment_start': 0,
'resp_segment_end': (None if self.obj_length is None
else self.obj_length - 1),
'req_fragment_start': 0,
'req_fragment_end': self.fa_length - 1,
'resp_fragment_start': 0,
'resp_fragment_end': self.fa_length - 1,
'satisfiable': self.obj_length > 0,
}]
else:
client_asked_for_range = True
range_specs = self.range_specs
self._fill_out_range_specs_from_obj_length(range_specs)
multipart = (len([rs for rs in range_specs if rs['satisfiable']]) > 1)
# Multipart responses are not required to be in the same order as
# the Range header; the parts may be in any order the server wants.
# Further, if multiple ranges are requested and only some are
# satisfiable, then only the satisfiable ones appear in the response
# at all. Thus, we cannot simply iterate over range_specs in order;
# we must use the Content-Range header from each part to figure out
# what we've been given.
#
# We do, however, make the assumption that all the object-server
# responses have their ranges in the same order. Otherwise, a
# streaming decode would be impossible.
def convert_ranges_iter():
seen_first_headers = False
ranges_for_resp = {}
while True:
# this'll raise StopIteration and exit the loop
next_range = self._next_range()
headers, frag_iters = next_range
content_type = headers['Content-Type']
content_range = headers.get('Content-Range')
if content_range is not None:
fa_start, fa_end, fa_length = parse_content_range(
content_range)
elif self.fa_length <= 0:
fa_start = None
fa_end = None
fa_length = 0
else:
fa_start = 0
fa_end = self.fa_length - 1
fa_length = self.fa_length
if not seen_first_headers:
# This is the earliest we can possibly do this. On a
# 200 or 206-single-byterange response, we can learn
# the FA's length from the HTTP response headers.
# However, on a 206-multiple-byteranges response, we
# don't learn it until the first part of the
# response body, in the headers of the first MIME
# part.
#
# Similarly, the content type of a
# 206-multiple-byteranges response is
# "multipart/byteranges", not the object's actual
# content type.
self._fill_out_range_specs_from_fa_length(
fa_length, range_specs)
satisfiable = False
for range_spec in range_specs:
satisfiable |= range_spec['satisfiable']
key = (range_spec['resp_fragment_start'],
range_spec['resp_fragment_end'])
ranges_for_resp.setdefault(key, []).append(range_spec)
# The client may have asked for an unsatisfiable set of
# ranges, but when converted to fragments, the object
# servers see it as satisfiable. For example, imagine a
# request for bytes 800-900 of a 750-byte object with a
# 1024-byte segment size. The object servers will see a
# request for bytes 0-${fragsize-1}, and that's
# satisfiable, so they return 206. It's not until we
# learn the object size that we can check for this
# condition.
#
# Note that some unsatisfiable ranges *will* be caught
# by the object servers, like bytes 1800-1900 of a
# 100-byte object with 1024-byte segments. That's not
# what we're dealing with here, though.
if client_asked_for_range and not satisfiable:
req.environ[
'swift.non_client_disconnect'] = True
raise HTTPRequestedRangeNotSatisfiable(
request=req, headers=resp_headers)
self.learned_content_type = content_type
seen_first_headers = True
range_spec = ranges_for_resp[(fa_start, fa_end)].pop(0)
seg_iter = self._decode_segments_from_fragments(frag_iters)
if not range_spec['satisfiable']:
# This'll be small; just a single small segment. Discard
# it.
for x in seg_iter:
pass
continue
byterange_iter = self._iter_one_range(range_spec, seg_iter)
converted = {
"start_byte": range_spec["resp_client_start"],
"end_byte": range_spec["resp_client_end"],
"content_type": content_type,
"part_iter": byterange_iter}
if self.obj_length is not None:
converted["entity_length"] = self.obj_length
yield converted
return document_iters_to_http_response_body(
convert_ranges_iter(), self.mime_boundary, multipart, self.logger)
def _iter_one_range(self, range_spec, segment_iter):
client_start = range_spec['resp_client_start']
client_end = range_spec['resp_client_end']
segment_start = range_spec['resp_segment_start']
segment_end = range_spec['resp_segment_end']
# It's entirely possible that the client asked for a range that
# includes some bytes we have and some we don't; for example, a
# range of bytes 1000-20000000 on a 1500-byte object.
segment_end = (min(segment_end, self.obj_length - 1)
if segment_end is not None
else self.obj_length - 1)
client_end = (min(client_end, self.obj_length - 1)
if client_end is not None
else self.obj_length - 1)
num_segments = int(
math.ceil(float(segment_end + 1 - segment_start)
/ self.policy.ec_segment_size))
# We get full segments here, but the client may have requested a
# byte range that begins or ends in the middle of a segment.
# Thus, we have some amount of overrun (extra decoded bytes)
# that we trim off so the client gets exactly what they
# requested.
start_overrun = client_start - segment_start
end_overrun = segment_end - client_end
for i, next_seg in enumerate(segment_iter):
# We may have a start_overrun of more than one segment in
# the case of suffix-byte-range requests. However, we never
# have an end_overrun of more than one segment.
if start_overrun > 0:
seglen = len(next_seg)
if seglen <= start_overrun:
start_overrun -= seglen
continue
else:
next_seg = next_seg[start_overrun:]
start_overrun = 0
if i == (num_segments - 1) and end_overrun:
next_seg = next_seg[:-end_overrun]
yield next_seg
def _decode_segments_from_fragments(self, fragment_iters):
# Decodes the fragments from the object servers and yields one
# segment at a time.
queues = [Queue(1) for _junk in range(len(fragment_iters))]
def put_fragments_in_queue(frag_iter, queue):
try:
for fragment in frag_iter:
if fragment[0] == ' ':
raise Exception('Leading whitespace on fragment.')
queue.put(fragment)
except GreenletExit:
# killed by contextpool
pass
except ChunkReadTimeout:
# unable to resume in GetOrHeadHandler
self.logger.exception("Timeout fetching fragments for %r" %
self.path)
except: # noqa
self.logger.exception("Exception fetching fragments for %r" %
self.path)
finally:
queue.resize(2) # ensure there's room
queue.put(None)
with ContextPool(len(fragment_iters)) as pool:
for frag_iter, queue in zip(fragment_iters, queues):
pool.spawn(put_fragments_in_queue, frag_iter, queue)
while True:
fragments = []
for queue in queues:
fragment = queue.get()
queue.task_done()
fragments.append(fragment)
# If any object server connection yields out a None; we're
# done. Either they are all None, and we've finished
# successfully; or some un-recoverable failure has left us
# with an un-reconstructible list of fragments - so we'll
# break out of the iter so WSGI can tear down the broken
# connection.
if not all(fragments):
break
try:
segment = self.policy.pyeclib_driver.decode(fragments)
except ECDriverError:
self.logger.exception("Error decoding fragments for %r" %
self.path)
raise
yield segment
def app_iter_range(self, start, end):
return self
def app_iter_ranges(self, ranges, content_type, boundary, content_size):
return self
def client_range_to_segment_range(client_start, client_end, segment_size):
"""
Takes a byterange from the client and converts it into a byterange
spanning the necessary segments.
Handles prefix, suffix, and fully-specified byte ranges.
Examples:
client_range_to_segment_range(100, 700, 512) = (0, 1023)
client_range_to_segment_range(100, 700, 256) = (0, 767)
client_range_to_segment_range(300, None, 256) = (256, None)
:param client_start: first byte of the range requested by the client
:param client_end: last byte of the range requested by the client
:param segment_size: size of an EC segment, in bytes
:returns: a 2-tuple (seg_start, seg_end) where
* seg_start is the first byte of the first segment, or None if this is
a suffix byte range
* seg_end is the last byte of the last segment, or None if this is a
prefix byte range
"""
# the index of the first byte of the first segment
segment_start = (
int(client_start // segment_size)
* segment_size) if client_start is not None else None
# the index of the last byte of the last segment
segment_end = (
# bytes M-
None if client_end is None else
# bytes M-N
(((int(client_end // segment_size) + 1)
* segment_size) - 1) if client_start is not None else
# bytes -N: we get some extra bytes to make sure we
# have all we need.
#
# To see why, imagine a 100-byte segment size, a
# 340-byte object, and a request for the last 50
# bytes. Naively requesting the last 100 bytes would
# result in a truncated first segment and hence a
# truncated download. (Of course, the actual
# obj-server requests are for fragments, not
# segments, but that doesn't change the
# calculation.)
#
# This does mean that we fetch an extra segment if
# the object size is an exact multiple of the
# segment size. It's a little wasteful, but it's
# better to be a little wasteful than to get some
# range requests completely wrong.
(int(math.ceil((
float(client_end) / segment_size) + 1)) # nsegs
* segment_size))
return (segment_start, segment_end)
def segment_range_to_fragment_range(segment_start, segment_end, segment_size,
fragment_size):
"""
Takes a byterange spanning some segments and converts that into a
byterange spanning the corresponding fragments within their fragment
archives.
Handles prefix, suffix, and fully-specified byte ranges.
:param segment_start: first byte of the first segment
:param segment_end: last byte of the last segment
:param segment_size: size of an EC segment, in bytes
:param fragment_size: size of an EC fragment, in bytes
:returns: a 2-tuple (frag_start, frag_end) where
* frag_start is the first byte of the first fragment, or None if this
is a suffix byte range
* frag_end is the last byte of the last fragment, or None if this is a
prefix byte range
"""
# Note: segment_start and (segment_end + 1) are
# multiples of segment_size, so we don't have to worry
# about integer math giving us rounding troubles.
#
# There's a whole bunch of +1 and -1 in here; that's because HTTP wants
# byteranges to be inclusive of the start and end, so e.g. bytes 200-300
# is a range containing 101 bytes. Python has half-inclusive ranges, of
# course, so we have to convert back and forth. We try to keep things in
# HTTP-style byteranges for consistency.
# the index of the first byte of the first fragment
fragment_start = ((
segment_start / segment_size * fragment_size)
if segment_start is not None else None)
# the index of the last byte of the last fragment
fragment_end = (
# range unbounded on the right
None if segment_end is None else
# range unbounded on the left; no -1 since we're
# asking for the last N bytes, not to have a
# particular byte be the last one
((segment_end + 1) / segment_size
* fragment_size) if segment_start is None else
# range bounded on both sides; the -1 is because the
# rest of the expression computes the length of the
# fragment, and a range of N bytes starts at index M
# and ends at M + N - 1.
((segment_end + 1) / segment_size * fragment_size) - 1)
return (fragment_start, fragment_end)
NO_DATA_SENT = 1
SENDING_DATA = 2
DATA_SENT = 3
DATA_ACKED = 4
COMMIT_SENT = 5
class ECPutter(object):
"""
This is here mostly to wrap up the fact that all EC PUTs are
chunked because of the mime boundary footer trick and the first
half of the two-phase PUT conversation handling.
An HTTP PUT request that supports streaming.
Probably deserves more docs than this, but meh.
"""
def __init__(self, conn, node, resp, path, connect_duration,
mime_boundary):
# Note: you probably want to call Putter.connect() instead of
# instantiating one of these directly.
self.conn = conn
self.node = node
self.resp = resp
self.path = path
self.connect_duration = connect_duration
# for handoff nodes node_index is None
self.node_index = node.get('index')
self.mime_boundary = mime_boundary
self.chunk_hasher = md5()
self.failed = False
self.queue = None
self.state = NO_DATA_SENT
def current_status(self):
"""
Returns the current status of the response.
A response starts off with no current status, then may or may not have
a status of 100 for some time, and then ultimately has a final status
like 200, 404, et cetera.
"""
return self.resp.status
def await_response(self, timeout, informational=False):
"""
Get 100-continue response indicating the end of 1st phase of a 2-phase
commit or the final response, i.e. the one with status >= 200.
Might or might not actually wait for anything. If we said Expect:
100-continue but got back a non-100 response, that'll be the thing
returned, and we won't do any network IO to get it. OTOH, if we got
a 100 Continue response and sent up the PUT request's body, then
we'll actually read the 2xx-5xx response off the network here.
:returns: HTTPResponse
:raises: Timeout if the response took too long
"""
conn = self.conn
with Timeout(timeout):
if not conn.resp:
if informational:
self.resp = conn.getexpect()
else:
self.resp = conn.getresponse()
return self.resp
def spawn_sender_greenthread(self, pool, queue_depth, write_timeout,
exception_handler):
"""Call before sending the first chunk of request body"""
self.queue = Queue(queue_depth)
pool.spawn(self._send_file, write_timeout, exception_handler)
def wait(self):
if self.queue.unfinished_tasks:
self.queue.join()
def _start_mime_doc_object_body(self):
self.queue.put("--%s\r\nX-Document: object body\r\n\r\n" %
(self.mime_boundary,))
def send_chunk(self, chunk):
if not chunk:
# If we're not using chunked transfer-encoding, sending a 0-byte
# chunk is just wasteful. If we *are* using chunked
# transfer-encoding, sending a 0-byte chunk terminates the
# request body. Neither one of these is good.
return
elif self.state == DATA_SENT:
raise ValueError("called send_chunk after end_of_object_data")
if self.state == NO_DATA_SENT and self.mime_boundary:
# We're sending the object plus other stuff in the same request
# body, all wrapped up in multipart MIME, so we'd better start
# off the MIME document before sending any object data.
self._start_mime_doc_object_body()
self.state = SENDING_DATA
self.queue.put(chunk)
def end_of_object_data(self, footer_metadata):
"""
Call when there is no more data to send.
:param footer_metadata: dictionary of metadata items
"""
if self.state == DATA_SENT:
raise ValueError("called end_of_object_data twice")
elif self.state == NO_DATA_SENT and self.mime_boundary:
self._start_mime_doc_object_body()
footer_body = json.dumps(footer_metadata)
footer_md5 = md5(footer_body).hexdigest()
tail_boundary = ("--%s" % (self.mime_boundary,))
message_parts = [
("\r\n--%s\r\n" % self.mime_boundary),
"X-Document: object metadata\r\n",
"Content-MD5: %s\r\n" % footer_md5,
"\r\n",
footer_body, "\r\n",
tail_boundary, "\r\n",
]
self.queue.put("".join(message_parts))
self.queue.put('')
self.state = DATA_SENT
def send_commit_confirmation(self):
"""
Call when there are > quorum 2XX responses received. Send commit
confirmations to all object nodes to finalize the PUT.
"""
if self.state == COMMIT_SENT:
raise ValueError("called send_commit_confirmation twice")
self.state = DATA_ACKED
if self.mime_boundary:
body = "put_commit_confirmation"
tail_boundary = ("--%s--" % (self.mime_boundary,))
message_parts = [
"X-Document: put commit\r\n",
"\r\n",
body, "\r\n",
tail_boundary,
]
self.queue.put("".join(message_parts))
self.queue.put('')
self.state = COMMIT_SENT
def _send_file(self, write_timeout, exception_handler):
"""
Method for a file PUT coro. Takes chunks from a queue and sends them
down a socket.
If something goes wrong, the "failed" attribute will be set to true
and the exception handler will be called.
"""
while True:
chunk = self.queue.get()
if not self.failed:
to_send = "%x\r\n%s\r\n" % (len(chunk), chunk)
try:
with ChunkWriteTimeout(write_timeout):
self.conn.send(to_send)
except (Exception, ChunkWriteTimeout):
self.failed = True
exception_handler(self.conn.node, _('Object'),
_('Trying to write to %s') % self.path)
self.queue.task_done()
@classmethod
def connect(cls, node, part, path, headers, conn_timeout, node_timeout,
chunked=False):
"""
Connect to a backend node and send the headers.
:returns: Putter instance
:raises: ConnectionTimeout if initial connection timed out
:raises: ResponseTimeout if header retrieval timed out
:raises: InsufficientStorage on 507 response from node
:raises: PutterConnectError on non-507 server error response from node
:raises: FooterNotSupported if need_metadata_footer is set but
backend node can't process footers
:raises: MultiphasePUTNotSupported if need_multiphase_support is
set but backend node can't handle multiphase PUT
"""
mime_boundary = "%.64x" % random.randint(0, 16 ** 64)
headers = HeaderKeyDict(headers)
# We're going to be adding some unknown amount of data to the
# request, so we can't use an explicit content length, and thus
# we must use chunked encoding.
headers['Transfer-Encoding'] = 'chunked'
headers['Expect'] = '100-continue'
if 'Content-Length' in headers:
headers['X-Backend-Obj-Content-Length'] = \
headers.pop('Content-Length')
headers['X-Backend-Obj-Multipart-Mime-Boundary'] = mime_boundary
headers['X-Backend-Obj-Metadata-Footer'] = 'yes'
headers['X-Backend-Obj-Multiphase-Commit'] = 'yes'
start_time = time.time()
with ConnectionTimeout(conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, 'PUT', path, headers)
connect_duration = time.time() - start_time
with ResponseTimeout(node_timeout):
resp = conn.getexpect()
if resp.status == HTTP_INSUFFICIENT_STORAGE:
raise InsufficientStorage
if is_server_error(resp.status):
raise PutterConnectError(resp.status)
if is_informational(resp.status):
continue_headers = HeaderKeyDict(resp.getheaders())
can_send_metadata_footer = config_true_value(
continue_headers.get('X-Obj-Metadata-Footer', 'no'))
can_handle_multiphase_put = config_true_value(
continue_headers.get('X-Obj-Multiphase-Commit', 'no'))
if not can_send_metadata_footer:
raise FooterNotSupported()
if not can_handle_multiphase_put:
raise MultiphasePUTNotSupported()
conn.node = node
conn.resp = None
if is_success(resp.status) or resp.status == HTTP_CONFLICT:
conn.resp = resp
elif (headers.get('If-None-Match', None) is not None and
resp.status == HTTP_PRECONDITION_FAILED):
conn.resp = resp
return cls(conn, node, resp, path, connect_duration, mime_boundary)
def chunk_transformer(policy, nstreams):
segment_size = policy.ec_segment_size
buf = collections.deque()
total_buf_len = 0
chunk = yield
while chunk:
buf.append(chunk)
total_buf_len += len(chunk)
if total_buf_len >= segment_size:
chunks_to_encode = []
# extract as many chunks as we can from the input buffer
while total_buf_len >= segment_size:
to_take = segment_size
pieces = []
while to_take > 0:
piece = buf.popleft()
if len(piece) > to_take:
buf.appendleft(piece[to_take:])
piece = piece[:to_take]
pieces.append(piece)
to_take -= len(piece)
total_buf_len -= len(piece)
chunks_to_encode.append(''.join(pieces))
frags_by_byte_order = []
for chunk_to_encode in chunks_to_encode:
frags_by_byte_order.append(
policy.pyeclib_driver.encode(chunk_to_encode))
# Sequential calls to encode() have given us a list that
# looks like this:
#
# [[frag_A1, frag_B1, frag_C1, ...],
# [frag_A2, frag_B2, frag_C2, ...], ...]
#
# What we need is a list like this:
#
# [(frag_A1 + frag_A2 + ...), # destined for node A
# (frag_B1 + frag_B2 + ...), # destined for node B
# (frag_C1 + frag_C2 + ...), # destined for node C
# ...]
obj_data = [''.join(frags)
for frags in zip(*frags_by_byte_order)]
chunk = yield obj_data
else:
# didn't have enough data to encode
chunk = yield None
# Now we've gotten an empty chunk, which indicates end-of-input.
# Take any leftover bytes and encode them.
last_bytes = ''.join(buf)
if last_bytes:
last_frags = policy.pyeclib_driver.encode(last_bytes)
yield last_frags
else:
yield [''] * nstreams
def trailing_metadata(policy, client_obj_hasher,
bytes_transferred_from_client,
fragment_archive_index):
return {
# etag and size values are being added twice here.
# The container override header is used to update the container db
# with these values as they represent the correct etag and size for
# the whole object and not just the FA.
# The object sysmeta headers will be saved on each FA of the object.
'X-Object-Sysmeta-EC-Etag': client_obj_hasher.hexdigest(),
'X-Object-Sysmeta-EC-Content-Length':
str(bytes_transferred_from_client),
'X-Backend-Container-Update-Override-Etag':
client_obj_hasher.hexdigest(),
'X-Backend-Container-Update-Override-Size':
str(bytes_transferred_from_client),
'X-Object-Sysmeta-Ec-Frag-Index': str(fragment_archive_index),
# These fields are for debuggability,
# AKA "what is this thing?"
'X-Object-Sysmeta-EC-Scheme': policy.ec_scheme_description,
'X-Object-Sysmeta-EC-Segment-Size': str(policy.ec_segment_size),
}
@ObjectControllerRouter.register(EC_POLICY)
class ECObjectController(BaseObjectController):
def _fragment_GET_request(self, req, node_iter, partition, policy):
"""
Makes a GET request for a fragment.
"""
backend_headers = self.generate_request_headers(
req, additional=req.headers)
getter = ResumingGetter(self.app, req, 'Object', node_iter,
partition, req.swift_entity_path,
backend_headers,
client_chunk_size=policy.fragment_size,
newest=False)
return (getter, getter.response_parts_iter(req))
def _convert_range(self, req, policy):
"""
Take the requested range(s) from the client and convert it to range(s)
to be sent to the object servers.
This includes widening requested ranges to full segments, then
converting those ranges to fragments so that we retrieve the minimum
number of fragments from the object server.
Mutates the request passed in.
Returns a list of range specs (dictionaries with the different byte
indices in them).
"""
# Since segments and fragments have different sizes, we need
# to modify the Range header sent to the object servers to
# make sure we get the right fragments out of the fragment
# archives.
segment_size = policy.ec_segment_size
fragment_size = policy.fragment_size
range_specs = []
new_ranges = []
for client_start, client_end in req.range.ranges:
# TODO: coalesce ranges that overlap segments. For
# example, "bytes=0-10,20-30,40-50" with a 64 KiB
# segment size will result in a a Range header in the
# object request of "bytes=0-65535,0-65535,0-65535",
# which is wasteful. We should be smarter and only
# request that first segment once.
segment_start, segment_end = client_range_to_segment_range(
client_start, client_end, segment_size)
fragment_start, fragment_end = \
segment_range_to_fragment_range(
segment_start, segment_end,
segment_size, fragment_size)
new_ranges.append((fragment_start, fragment_end))
range_specs.append({'req_client_start': client_start,
'req_client_end': client_end,
'req_segment_start': segment_start,
'req_segment_end': segment_end,
'req_fragment_start': fragment_start,
'req_fragment_end': fragment_end})
req.range = "bytes=" + ",".join(
"%s-%s" % (s if s is not None else "",
e if e is not None else "")
for s, e in new_ranges)
return range_specs
def _get_or_head_response(self, req, node_iter, partition, policy):
req.headers.setdefault("X-Backend-Etag-Is-At",
"X-Object-Sysmeta-Ec-Etag")
if req.method == 'HEAD':
# no fancy EC decoding here, just one plain old HEAD request to
# one object server because all fragments hold all metadata
# information about the object.
resp = self.GETorHEAD_base(
req, _('Object'), node_iter, partition,
req.swift_entity_path)
else: # GET request
orig_range = None
range_specs = []
if req.range:
orig_range = req.range
range_specs = self._convert_range(req, policy)
safe_iter = GreenthreadSafeIterator(node_iter)
with ContextPool(policy.ec_ndata) as pool:
pile = GreenAsyncPile(pool)
for _junk in range(policy.ec_ndata):
pile.spawn(self._fragment_GET_request,
req, safe_iter, partition,
policy)
bad_gets = []
etag_buckets = collections.defaultdict(list)
best_etag = None
for get, parts_iter in pile:
if is_success(get.last_status):
etag = HeaderKeyDict(
get.last_headers)['X-Object-Sysmeta-Ec-Etag']
etag_buckets[etag].append((get, parts_iter))
if etag != best_etag and (
len(etag_buckets[etag]) >
len(etag_buckets[best_etag])):
best_etag = etag
else:
bad_gets.append((get, parts_iter))
matching_response_count = max(
len(etag_buckets[best_etag]), len(bad_gets))
if (policy.ec_ndata - matching_response_count >
pile._pending) and node_iter.nodes_left > 0:
# we need more matching responses to reach ec_ndata
# than we have pending gets, as long as we still have
# nodes in node_iter we can spawn another
pile.spawn(self._fragment_GET_request, req,
safe_iter, partition, policy)
req.range = orig_range
if len(etag_buckets[best_etag]) >= policy.ec_ndata:
# headers can come from any of the getters
resp_headers = HeaderKeyDict(
etag_buckets[best_etag][0][0].source_headers[-1])
resp_headers.pop('Content-Range', None)
eccl = resp_headers.get('X-Object-Sysmeta-Ec-Content-Length')
obj_length = int(eccl) if eccl is not None else None
# This is only true if we didn't get a 206 response, but
# that's the only time this is used anyway.
fa_length = int(resp_headers['Content-Length'])
app_iter = ECAppIter(
req.swift_entity_path,
policy,
[iterator for getter, iterator in etag_buckets[best_etag]],
range_specs, fa_length, obj_length,
self.app.logger)
resp = Response(
request=req,
headers=resp_headers,
conditional_response=True,
app_iter=app_iter)
app_iter.kickoff(req, resp)
else:
statuses = []
reasons = []
bodies = []
headers = []
for getter, body_parts_iter in bad_gets:
statuses.extend(getter.statuses)
reasons.extend(getter.reasons)
bodies.extend(getter.bodies)
headers.extend(getter.source_headers)
resp = self.best_response(
req, statuses, reasons, bodies, 'Object',
headers=headers)
self._fix_response(resp)
return resp
def _fix_response(self, resp):
# EC fragment archives each have different bytes, hence different
# etags. However, they all have the original object's etag stored in
# sysmeta, so we copy that here so the client gets it.
if is_success(resp.status_int):
resp.headers['Etag'] = resp.headers.get(
'X-Object-Sysmeta-Ec-Etag')
resp.headers['Content-Length'] = resp.headers.get(
'X-Object-Sysmeta-Ec-Content-Length')
resp.fix_conditional_response()
def _connect_put_node(self, node_iter, part, path, headers,
logger_thread_locals):
"""
Make a connection for a erasure encoded object.
Connects to the first working node that it finds in node_iter and sends
over the request headers. Returns a Putter to handle the rest of the
streaming, or None if no working nodes were found.
"""
# the object server will get different bytes, so these
# values do not apply (Content-Length might, in general, but
# in the specific case of replication vs. EC, it doesn't).
headers.pop('Content-Length', None)
headers.pop('Etag', None)
self.app.logger.thread_locals = logger_thread_locals
for node in node_iter:
try:
putter = ECPutter.connect(
node, part, path, headers,
conn_timeout=self.app.conn_timeout,
node_timeout=self.app.node_timeout)
self.app.set_node_timing(node, putter.connect_duration)
return putter
except InsufficientStorage:
self.app.error_limit(node, _('ERROR Insufficient Storage'))
except PutterConnectError as e:
self.app.error_occurred(
node, _('ERROR %(status)d Expect: 100-continue '
'From Object Server') % {
'status': e.status})
except (Exception, Timeout):
self.app.exception_occurred(
node, _('Object'),
_('Expect: 100-continue on %s') % path)
def _determine_chunk_destinations(self, putters):
"""
Given a list of putters, return a dict where the key is the putter
and the value is the node index to use.
This is done so that we line up handoffs using the same node index
(in the primary part list) as the primary that the handoff is standing
in for. This lets erasure-code fragment archives wind up on the
preferred local primary nodes when possible.
"""
# Give each putter a "chunk index": the index of the
# transformed chunk that we'll send to it.
#
# For primary nodes, that's just its index (primary 0 gets
# chunk 0, primary 1 gets chunk 1, and so on). For handoffs,
# we assign the chunk index of a missing primary.
handoff_conns = []
chunk_index = {}
for p in putters:
if p.node_index is not None:
chunk_index[p] = p.node_index
else:
handoff_conns.append(p)
# Note: we may have more holes than handoffs. This is okay; it
# just means that we failed to connect to one or more storage
# nodes. Holes occur when a storage node is down, in which
# case the connection is not replaced, and when a storage node
# returns 507, in which case a handoff is used to replace it.
holes = [x for x in range(len(putters))
if x not in chunk_index.values()]
for hole, p in zip(holes, handoff_conns):
chunk_index[p] = hole
return chunk_index
def _transfer_data(self, req, policy, data_source, putters, nodes,
min_conns, etag_hasher):
"""
Transfer data for an erasure coded object.
This method was added in the PUT method extraction change
"""
bytes_transferred = 0
chunk_transform = chunk_transformer(policy, len(nodes))
chunk_transform.send(None)
def send_chunk(chunk):
if etag_hasher:
etag_hasher.update(chunk)
backend_chunks = chunk_transform.send(chunk)
if backend_chunks is None:
# If there's not enough bytes buffered for erasure-encoding
# or whatever we're doing, the transform will give us None.
return
for putter in list(putters):
backend_chunk = backend_chunks[chunk_index[putter]]
if not putter.failed:
putter.chunk_hasher.update(backend_chunk)
putter.send_chunk(backend_chunk)
else:
putters.remove(putter)
self._check_min_conn(
req, putters, min_conns, msg='Object PUT exceptions during'
' send, %(conns)s/%(nodes)s required connections')
try:
with ContextPool(len(putters)) as pool:
# build our chunk index dict to place handoffs in the
# same part nodes index as the primaries they are covering
chunk_index = self._determine_chunk_destinations(putters)
for putter in putters:
putter.spawn_sender_greenthread(
pool, self.app.put_queue_depth, self.app.node_timeout,
self.app.exception_occurred)
while True:
with ChunkReadTimeout(self.app.client_timeout):
try:
chunk = next(data_source)
except StopIteration:
break
bytes_transferred += len(chunk)
if bytes_transferred > constraints.MAX_FILE_SIZE:
raise HTTPRequestEntityTooLarge(request=req)
send_chunk(chunk)
if req.content_length and (
bytes_transferred < req.content_length):
req.client_disconnect = True
self.app.logger.warn(
_('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
computed_etag = (etag_hasher.hexdigest()
if etag_hasher else None)
received_etag = req.headers.get(
'etag', '').strip('"')
if (computed_etag and received_etag and
computed_etag != received_etag):
raise HTTPUnprocessableEntity(request=req)
send_chunk('') # flush out any buffered data
for putter in putters:
trail_md = trailing_metadata(
policy, etag_hasher,
bytes_transferred,
chunk_index[putter])
trail_md['Etag'] = \
putter.chunk_hasher.hexdigest()
putter.end_of_object_data(trail_md)
for putter in putters:
putter.wait()
# for storage policies requiring 2-phase commit (e.g.
# erasure coding), enforce >= 'quorum' number of
# 100-continue responses - this indicates successful
# object data and metadata commit and is a necessary
# condition to be met before starting 2nd PUT phase
final_phase = False
need_quorum = True
statuses, reasons, bodies, _junk, quorum = \
self._get_put_responses(
req, putters, len(nodes), final_phase,
min_conns, need_quorum=need_quorum)
if not quorum:
self.app.logger.error(
_('Not enough object servers ack\'ed (got %d)'),
statuses.count(HTTP_CONTINUE))
raise HTTPServiceUnavailable(request=req)
elif not self._have_adequate_informational(
statuses, min_conns):
resp = self.best_response(req, statuses, reasons, bodies,
_('Object PUT'),
quorum_size=min_conns)
if is_client_error(resp.status_int):
# if 4xx occurred in this state it is absolutely
# a bad conversation between proxy-server and
# object-server (even if it's
# HTTP_UNPROCESSABLE_ENTITY) so we should regard this
# as HTTPServiceUnavailable.
raise HTTPServiceUnavailable(request=req)
else:
# Other errors should use raw best_response
raise resp
# quorum achieved, start 2nd phase - send commit
# confirmation to participating object servers
# so they write a .durable state file indicating
# a successful PUT
for putter in putters:
putter.send_commit_confirmation()
for putter in putters:
putter.wait()
except ChunkReadTimeout as err:
self.app.logger.warn(
_('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req)
except ChunkReadError:
req.client_disconnect = True
self.app.logger.warn(
_('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
except HTTPException:
raise
except (Exception, Timeout):
self.app.logger.exception(
_('ERROR Exception causing client disconnect'))
raise HTTPClientDisconnect(request=req)
def _have_adequate_responses(
self, statuses, min_responses, conditional_func):
"""
Given a list of statuses from several requests, determine if a
satisfactory number of nodes have responded with 1xx or 2xx statuses to
deem the transaction for a succssful response to the client.
:param statuses: list of statuses returned so far
:param min_responses: minimal pass criterion for number of successes
:param conditional_func: a callable function to check http status code
:returns: True or False, depending on current number of successes
"""
if sum(1 for s in statuses if (conditional_func(s))) >= min_responses:
return True
return False
def _have_adequate_successes(self, statuses, min_responses):
"""
Partial method of _have_adequate_responses for 2xx
"""
return self._have_adequate_responses(
statuses, min_responses, is_success)
def _have_adequate_informational(self, statuses, min_responses):
"""
Partial method of _have_adequate_responses for 1xx
"""
return self._have_adequate_responses(
statuses, min_responses, is_informational)
def _await_response(self, conn, final_phase):
return conn.await_response(
self.app.node_timeout, not final_phase)
def _get_conn_response(self, conn, req, final_phase, **kwargs):
try:
resp = self._await_response(conn, final_phase=final_phase,
**kwargs)
except (Exception, Timeout):
resp = None
if final_phase:
status_type = 'final'
else:
status_type = 'commit'
self.app.exception_occurred(
conn.node, _('Object'),
_('Trying to get %s status of PUT to %s') % (
status_type, req.path))
return (conn, resp)
def _get_put_responses(self, req, putters, num_nodes, final_phase,
min_responses, need_quorum=True):
"""
Collect erasure coded object responses.
Collect object responses to a PUT request and determine if
satisfactory number of nodes have returned success. Return
statuses, quorum result if indicated by 'need_quorum' and
etags if this is a final phase or a multiphase PUT transaction.
:param req: the request
:param putters: list of putters for the request
:param num_nodes: number of nodes involved
:param final_phase: boolean indicating if this is the last phase
:param min_responses: minimum needed when not requiring quorum
:param need_quorum: boolean indicating if quorum is required
"""
statuses = []
reasons = []
bodies = []
etags = set()
pile = GreenAsyncPile(len(putters))
for putter in putters:
if putter.failed:
continue
pile.spawn(self._get_conn_response, putter, req,
final_phase=final_phase)
def _handle_response(putter, response):
statuses.append(response.status)
reasons.append(response.reason)
if final_phase:
body = response.read()
else:
body = ''
bodies.append(body)
if response.status == HTTP_INSUFFICIENT_STORAGE:
putter.failed = True
self.app.error_limit(putter.node,
_('ERROR Insufficient Storage'))
elif response.status >= HTTP_INTERNAL_SERVER_ERROR:
putter.failed = True
self.app.error_occurred(
putter.node,
_('ERROR %(status)d %(body)s From Object Server '
're: %(path)s') %
{'status': response.status,
'body': body[:1024], 'path': req.path})
elif is_success(response.status):
etags.add(response.getheader('etag').strip('"'))
quorum = False
for (putter, response) in pile:
if response:
_handle_response(putter, response)
if self._have_adequate_successes(statuses, min_responses):
break
else:
putter.failed = True
# give any pending requests *some* chance to finish
finished_quickly = pile.waitall(self.app.post_quorum_timeout)
for (putter, response) in finished_quickly:
if response:
_handle_response(putter, response)
if need_quorum:
if final_phase:
while len(statuses) < num_nodes:
statuses.append(HTTP_SERVICE_UNAVAILABLE)
reasons.append('')
bodies.append('')
else:
# intermediate response phase - set return value to true only
# if there are responses having same value of *any* status
# except 5xx
if self.have_quorum(statuses, num_nodes, quorum=min_responses):
quorum = True
return statuses, reasons, bodies, etags, quorum
def _store_object(self, req, data_source, nodes, partition,
outgoing_headers):
"""
Store an erasure coded object.
"""
policy_index = int(req.headers.get('X-Backend-Storage-Policy-Index'))
policy = POLICIES.get_by_index(policy_index)
# Since the request body sent from client -> proxy is not
# the same as the request body sent proxy -> object, we
# can't rely on the object-server to do the etag checking -
# so we have to do it here.
etag_hasher = md5()
min_conns = policy.quorum
putters = self._get_put_connections(
req, nodes, partition, outgoing_headers,
policy, expect=True)
try:
# check that a minimum number of connections were established and
# meet all the correct conditions set in the request
self._check_failure_put_connections(putters, req, nodes, min_conns)
self._transfer_data(req, policy, data_source, putters,
nodes, min_conns, etag_hasher)
final_phase = True
need_quorum = False
# The .durable file will propagate in a replicated fashion; if
# one exists, the reconstructor will spread it around.
# In order to avoid successfully writing an object, but refusing
# to serve it on a subsequent GET because don't have enough
# durable data fragments - we require the same number of durable
# writes as quorum fragment writes. If object servers are in the
# future able to serve their non-durable fragment archives we may
# be able to reduce this quorum count if needed.
min_conns = policy.quorum
putters = [p for p in putters if not p.failed]
# ignore response etags, and quorum boolean
statuses, reasons, bodies, _etags, _quorum = \
self._get_put_responses(req, putters, len(nodes),
final_phase, min_conns,
need_quorum=need_quorum)
except HTTPException as resp:
return resp
etag = etag_hasher.hexdigest()
resp = self.best_response(req, statuses, reasons, bodies,
_('Object PUT'), etag=etag,
quorum_size=min_conns)
resp.last_modified = math.ceil(
float(Timestamp(req.headers['X-Timestamp'])))
return resp
| {
"content_hash": "5d4ef4c532d4d8805c977a940443c8d6",
"timestamp": "",
"source": "github",
"line_count": 2502,
"max_line_length": 144,
"avg_line_length": 43.03357314148681,
"alnum_prop": 0.5667131048574348,
"repo_name": "heemanshu/swift_liberty",
"id": "9f283bbc1f254f84f07097b68f7b188ab8b684c1",
"size": "108906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swift/proxy/controllers/obj.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6630761"
},
{
"name": "Shell",
"bytes": "950"
}
],
"symlink_target": ""
} |
import random, time
import os, sys, socket, subprocess, re
class GridController:
"""
To initialize an instance of the class:
test_controller = BsubController.BsubController(test_cmd_list, ...)
To run commands on the grid using that instance:
test_controller.run_grid_submission()
To get failed commands upon completion (retuns a list of tuples (command,job_id,return_value), no failed commands returns an empty list):
test_controller.get_failed_cmds()
To clean up logs:
test_controller.clean_logs()
"""
def __init__(self, command_list, platform = 'LSF', queue = 'hour', dotkits = [], cmds_per_node = 50, memory = False, cpus=False, mount_test = False, max_nodes=500, debug = False, project = False):
self.command_list = command_list
self.queue = queue
self.cmds_per_node = cmds_per_node
self.memory = memory
self.cpus = cpus
self.mount_test = mount_test
self.max_nodes = max_nodes
self.nodes_in_progress = {}
self.cmd_index_to_job_id = {}
self.cmd_index_to_shell_script = {}
self.job_id_to_submission_time = {}
self.debug = debug ## for debugging, enables extra messages
self.project = project
self.platform = platform
self.dotkits = dotkits
self.RESORT_TO_POLLING_TIME = 900
self.num_cmds_launched = 0
self.num_nodes_used = 0
self.num_cmds = len(self.command_list)
os.umask(0000)
## make log directory
self.log_id = str(random.randrange(10000000,100000000))
self.log_dir_name = 'bsub.' + self.log_id
if os.path.exists(self.log_dir_name):
log_except_str = 'log_dir ' + self.log_dir_name + ' already exists'
raise Exception(log_except_str)
else:
os.makedirs(self.log_dir_name)
## write commands to log directory
file = open((self.log_dir_name + '/cmds_list.txt'), 'w')
command_index = 0
for command in self.command_list:
file.write('index(' + str(command_index) + ')\t' + command + '\n')
command_index += 1
file.close()
## finish logging setup
self.command_dir = self.log_dir_name + '/cmds'
self.retvals_dir = self.log_dir_name + '/retvals'
self.monitor_dir = self.log_dir_name + '/monitor'
for dir in [self.command_dir, self.retvals_dir, self.monitor_dir]:
os.makedirs(dir)
def get_command_list(self):
return self.command_list
def get_log_dir_name(self):
return self.log_dir_name
def write_pid_file(self):
hostname = socket.gethostname()
file = open((self.log_dir_name + '/' + hostname + '.pid'), 'w')
file.write(str(os.getpid()))
file.close()
def run_grid_submission(self):
self.write_pid_file()
while self.num_cmds_launched < self.num_cmds:
self.num_cmds_launched = self.submit_job()
self.num_nodes_used = self.get_num_nodes_in_use()
sys.stdout.write('\rCMDS: ' + str(self.num_cmds_launched) + '/' + str(self.num_cmds) + ' [' + str(self.num_nodes_used) + '/' + str(self.max_nodes) + ' nodes in use] ')
sys.stdout.flush()
if self.num_nodes_used >= self.max_nodes:
num_nodes_finished = self.wait_for_completions()
self.num_nodes_used = self.num_nodes_used - num_nodes_finished
print('\nAll cmds submitted to grid. Now waiting for them to finish.')
## wait for rest to finish
num_nodes_finished = self.wait_for_completions()
while num_nodes_finished:
num_nodes_finished = self.wait_for_completions()
self.num_nodes_used = self.num_nodes_used - num_nodes_finished
sys.stdout.write('\rCMDS: ' + str(self.num_cmds_launched) + '/' + str(self.num_cmds) + ' [' + str(self.num_nodes_used) + '/' + str(self.max_nodes) + ' nodes in use] ')
sys.stdout.flush()
print('\nAll nodes completed. Now auditing job completion status values.')
self.get_exit_values()
num_successes = 0
num_failures = 0
num_unknown = 0
for retval in self.retvals:
try:
int(retval)
if int(retval) == 0:
num_successes = num_successes + 1
else:
num_failures = num_failures + 1
except:
num_unknown = num_unknown + 1
self.write_result_summary(num_successes, num_failures, num_unknown)
if num_successes == self.num_cmds:
print('All commands completed successfully.')
else:
print('num_success: ' + str(num_successes) + ' num_fail: ' + str(num_failures) + ' num_unknown: ' + str(num_unknown))
print('Finished')
def submit_job(self):
orig_num_cmds_launched = self.num_cmds_launched
shell_script = self.command_dir + '/S' + str(self.log_id) + '.' + str(self.num_cmds_launched) + '.sh'
file = open(shell_script, 'w')
file.write('#!/bin/sh\n\n')
file.write('## add any special environment settings\n\n')
file.write('echo HOST: $HOSTNAME\n')
file.write('echo HOST: $HOSTNAME >&2\n\n')
if self.platform == 'GridEngine' or self.platform == 'UGER' or self.dotkits:
file.write('source /broad/software/scripts/useuse\n')
if self.platform == 'GridEngine':
file.write('reuse GridEngine8\n')
if self.platform == 'UGER':
file.write('reuse UGER\n')
if self.dotkits:
for dotkit in self.dotkits:
file.write('reuse ' + dotkit + '\n')
num_cmds_written = 0
monitor_started = self.monitor_dir + '/' + str(self.num_cmds_launched) + '.started'
monitor_finished = self.monitor_dir + '/' + str(self.num_cmds_launched) + '.finished'
cmd_indices_prepped = []
while (self.num_cmds_launched < self.num_cmds) and (num_cmds_written < self.cmds_per_node): ## Brian's code had && instead of and
next_cmd_index = self.num_cmds_launched
cmd_string = self.command_list[next_cmd_index]
self.cmd_index_to_shell_script[next_cmd_index] = shell_script
cmd_indices_prepped.append(next_cmd_index)
retval_bin = str(next_cmd_index / 1000)
retval_subdir = self.retvals_dir + '/' + retval_bin
if not os.path.exists(retval_subdir):
os.makedirs(retval_subdir)
file.write('## Command index ' + str(next_cmd_index) + '\n')
file.write('touch ' + monitor_started + '\n')
file.write(cmd_string + '\n')
file.write('echo $? >> ' + retval_subdir + '/entry_' + str(next_cmd_index) + '.ret\n\n')
self.num_cmds_launched += 1
num_cmds_written += 1
file.write('\nrm -f ' + monitor_started + '\n')
file.write('touch ' + monitor_finished + '\n\n')
file.write('exit 0\n\n')
file.close()
os.chmod(shell_script, 0775)
if self.debug:
print('Submitting ' + shell_script + ' to grid')
script_basename = os.path.basename(shell_script)
cmd = ''
if self.platform == 'LSF':
cmd = 'bsub -q ' + self.queue + ' -e ' + shell_script + '.stderr -o ' + shell_script + '.stdout'
if self.memory:
cmd = cmd + ' -R \"rusage[mem=' + str(self.memory) + ']\"'
if self.cpus:
cmd = cmd + ' -n ' + str(self.cpus) + ' -R \"span[hosts=1]\"'
if self.queue == 'hour':
cmd = cmd + ' -W 4:0'
if self.project:
cmd = cmd + ' -P ' + self.project
if self.mount_test:
cmd = cmd + ' -E \"/broad/tools/NoArch/pkgs/local/checkmount ' + self.mount_test + ' && [ -e ' + self.mount_test + ' ]\"'
elif self.platform == 'GridEngine' or self.platform == 'UGER':
cmd = 'qsub -V -cwd -q ' + self.queue + ' -e ' + shell_script + '.stderr -o ' + shell_script + '.stdout'
if self.platform == 'GridEngine':
if self.memory:
cmd = cmd + ' -l h_vmem=' + str(self.memory) + 'G'
if self.cpus:
cmd = cmd + ' -pe smp_pe ' + str(self.cpus)
elif self.platform == 'UGER':
if self.memory:
memory_setting = self.memory
if self.cpus:
memory_setting = int(self.memory/self.cpus) + (self.memory%self.cpus > 0)
cmd = cmd + ' -l m_mem_free=' + str(memory_setting) + 'g'
if self.cpus:
cmd = cmd + ' -pe smp ' + str(self.cpus)
if self.project:
cmd = cmd + ' -P ' + self.project
if self.mount_test:
print('Mount test unavailable through GridEngine/UGER')
else:
raise Exception(('Unsupported grid platform: ' + self.platform))
cmd = cmd + ' ' + shell_script + ' 2>&1'
if self.debug:
print(cmd)
#submission_return = subprocess.call(cmd, shell=True)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
submission_out = process.communicate()[0]
submission_return = process.returncode
if submission_return:
print('Grid failed to accept job: ' + cmd + '\n (ret ' + str(submission_return) + ')\n')
os.unlink(shell_script) # cleanup, try again later
time.sleep(120) # sleep 2 minutes for now. Give the system time to recuperate if a problem exists
return orig_num_cmds_launched
else:
shell_script = os.path.basename(shell_script)
file = open((self.log_dir_name + '/job_ids.txt'), 'a')
job_pattern = re.compile(r'Job \<(\d+)\>')
if self.platform == 'GridEngine' or self.platform == 'UGER':
job_pattern = re.compile(r'Your job (\d+)')
matched = job_pattern.search(submission_out)
if matched:
job_id = matched.group(1)
file.write(job_id + '\t' + shell_script + '\n')
self.nodes_in_progress[monitor_finished] = job_id ## hope this is right
for cmd_index in cmd_indices_prepped:
self.cmd_index_to_job_id[cmd_index] = job_id
if self.debug:
print('job id: ' + str(job_id) + ' cmd index: ' + str(cmd_index))
self.job_id_to_submission_time[job_id] = int(time.time())
## self.job_id_tester = job_id ## for testing only
else:
raise Exception(('Fatal error, couldn\'t extract Job ID from submission text: ' + job_id_text))
file.close()
#time.sleep(15) # wait just a short while to give the system a few seconds to act on the submitted jobs
return(self.num_cmds_launched)
def get_num_nodes_in_use(self):
num_nodes_in_use = len(self.nodes_in_progress.keys())
if self.debug:
print('Num nodes currently in use: ' + str(num_nodes_in_use))
return num_nodes_in_use
def wait_for_completions(self):
if self.debug:
print('Running wait_for_completions()')
seen_finished = 0
done = []
while not seen_finished:
## check to see if there are any jobs remaining:
if self.get_num_nodes_in_use() == 0:
## no jobs in the queue
if self.debug:
print('No nodes in use, exiting wait')
return 0
## check for finished jobs
for monitor_file in self.nodes_in_progress.keys():
if os.path.isfile(monitor_file):
done.append(monitor_file)
seen_finished = 1
else:
job_id = self.nodes_in_progress[monitor_file]
time_launched = self.job_id_to_submission_time[job_id]
current_time = int(time.time())
## see if an hour has passed
if (current_time - time_launched) >= self.RESORT_TO_POLLING_TIME:
## poll the system directly:
if not self.is_job_running_or_pending_on_grid(job_id):
done.append(monitor_file)
seen_finished = 1
else:
## reset submission time to delay next polling time
self.job_id_to_submission_time[job_id] = int(time.time())
if seen_finished:
for monitor_file in done:
job_id = self.nodes_in_progress[monitor_file]
if self.debug:
print('job[' + str(job_id) + ']: ' + str(monitor_file) + ' is finished')
del self.nodes_in_progress[monitor_file]
del self.job_id_to_submission_time[job_id]
return len(done)
else:
## wait a while and check again
if self.debug:
print('Waiting for jobs to finish')
time.sleep(15)
def is_job_running_or_pending_on_grid(self, job_id):
## job_id = self.job_id_tester ## for testing only
if (int(time.time()) - self.job_id_to_submission_time[job_id]) < self.RESORT_TO_POLLING_TIME:
return('TOO_SOON')
if self.debug:
print('Polling grid to check status of job: ' + str(job_id))
attempts = 0
while attempts < 5:
cmd = 'bjobs ' + str(job_id)
if self.platform == 'GridEngine' or self.platform == 'UGER':
cmd = 'qstat -s za'
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
submission_out = process.communicate()[0]
submission_return = process.returncode
if not submission_return:
if self.debug:
print('Submission out: ' + submission_out)
split_out = submission_out.split('\n')
for split_out_line in split_out:
split_out2 = split_out_line.split()
try:
test_index = split_out2[0]
if split_out2[0] == job_id:
state = split_out2[2]
if self.platform == 'GridEngine' or self.platform == 'UGER':
state = split_out2[4]
if state == 'DONE' or state == 'EXIT' or state == 'UNKWN' or state == 'ZOMBI' or state == 'z' or state == 'Eqw':
return 0
else:
self.job_id_to_submission_time[job_id] = int(time.time())
return state
except:
return 0 ### job missing from qstat output
attempts = attempts + 1
time.sleep(15)
print('No record of job_id ' + str(job_id) + ', setting as state unknown\n')
return 0 ## Brian returns that as unknown, but it results in the same as UNKWN
def get_ret_filename(self, cmd_index):
retval_bin = str(cmd_index/1000);
retval_file = self.retvals_dir + '/' + retval_bin + '/entry_' + str(cmd_index) + '.ret'
return retval_file
def clean_logs(self):
pass
cmd = 'rm -rf ' + self.log_dir_name
return_code = subprocess.call(cmd, shell=True)
return return_code
def write_result_summary(self, num_successes, num_failures, num_unknown):
status = 'failure'
if num_failures == 0 and num_unknown == 0:
status = 'success'
file = open((self.log_dir_name + '/bsub.finished.' + status), 'a')
file.write('num_successes: ' + str(num_successes) + '\n')
file.write('num_failures: ' + str(num_failures) + '\n')
file.write('num_unknown: ' + str(num_unknown) + '\n')
file.close()
def get_failed_cmds(self):
failed_cmds = []
for i in range(len(self.retvals)):
if self.retvals[i]:
failed_cmds.append((self.command_list[i],self.cmd_index_to_job_id[i],self.retvals[i],self.cmd_index_to_shell_script[i]))
return failed_cmds
def get_exit_values(self):
self.retvals = []
if self.debug:
print('Processing ' + self.retvals_dir)
for i in range(self.num_cmds):
retval_file = self.get_ret_filename(i)
if self.debug:
print('file: ' + retval_file)
try:
os.path.getsize(retval_file)
file = open(retval_file, 'r')
retval_string = file.read()
retval_string = ''.join(retval_string.split())
if self.debug:
print('retval: ' + retval_string)
self.retvals.append(int(retval_string))
file.close()
except:
self.retvals.append('FILE_NOT_EXISTS')
| {
"content_hash": "cf1c6217dced6f766d0bce5bd18069d7",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 197,
"avg_line_length": 34.155660377358494,
"alnum_prop": 0.6349261151774617,
"repo_name": "rhysf/Synima",
"id": "9b5f0f232b17412e6f71ec2475f6dbd8b65c2726",
"size": "14558",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/support_scripts/GridController.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "252871"
},
{
"name": "Python",
"bytes": "17172"
}
],
"symlink_target": ""
} |
from collections import (
Counter,
defaultdict,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
concat,
isna,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.apply.common import series_transform_kernels
def test_series_map_box_timedelta():
# GH#11349
ser = Series(timedelta_range("1 day 1 s", periods=5, freq="h"))
def f(x):
return x.total_seconds()
ser.map(f)
ser.apply(f)
DataFrame(ser).applymap(f)
def test_apply(datetime_series):
with np.errstate(all="ignore"):
tm.assert_series_equal(datetime_series.apply(np.sqrt), np.sqrt(datetime_series))
# element-wise apply
import math
tm.assert_series_equal(datetime_series.apply(math.exp), np.exp(datetime_series))
# empty series
s = Series(dtype=object, name="foo", index=Index([], name="bar"))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
assert s is not rs
assert s.index is rs.index
assert s.dtype == rs.dtype
assert s.name == rs.name
# index but no data
s = Series(index=[1, 2, 3], dtype=np.float64)
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug():
s = Series([1, 2])
def f(x):
return (x, x + 1)
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
tm.assert_series_equal(result, expected)
def test_apply_dont_convert_dtype():
s = Series(np.random.randn(10))
def f(x):
return x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
assert result.dtype == object
def test_apply_args():
s = Series(["foo,bar"])
result = s.apply(str.split, args=(",",))
assert result[0] == ["foo", "bar"]
assert isinstance(result[0], list)
@pytest.mark.parametrize(
"args, kwargs, increment",
[((), {}, 0), ((), {"a": 1}, 1), ((2, 3), {}, 32), ((1,), {"c": 2}, 201)],
)
def test_agg_args(args, kwargs, increment):
# GH 43357
def f(x, a=0, b=0, c=0):
return x + a + 10 * b + 100 * c
s = Series([1, 2])
result = s.agg(f, 0, *args, **kwargs)
expected = s + increment
tm.assert_series_equal(result, expected)
def test_series_map_box_timestamps():
# GH#2689, GH#2627
ser = Series(pd.date_range("1/1/2000", periods=10))
def func(x):
return (x.hour, x.day, x.month)
# it works!
ser.map(func)
ser.apply(func)
def test_series_map_stringdtype(any_string_dtype):
# map test on StringDType, GH#40823
ser1 = Series(
data=["cat", "dog", "rabbit"],
index=["id1", "id2", "id3"],
dtype=any_string_dtype,
)
ser2 = Series(data=["id3", "id2", "id1", "id7000"], dtype=any_string_dtype)
result = ser2.map(ser1)
expected = Series(data=["rabbit", "dog", "cat", pd.NA], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
def test_apply_box():
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz():
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = Series(values, name="XX")
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
def test_apply_categorical():
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
ser = Series(values, name="XX", index=list("abcdefg"))
result = ser.apply(lambda x: x.lower())
# should be categorical dtype when the number of categories are
# the same
values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
exp = Series(values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp.values)
result = ser.apply(lambda x: "A")
exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
@pytest.mark.parametrize("series", [["1-1", "1-1", np.NaN], ["1-1", "1-2", np.NaN]])
def test_apply_categorical_with_nan_values(series):
# GH 20714 bug fixed in: GH 24275
s = Series(series, dtype="category")
result = s.apply(lambda x: x.split("-")[0])
result = result.astype(object)
expected = Series(["1", "1", np.NaN], dtype="category")
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
def test_apply_empty_integer_series_with_datetime_index():
# GH 21245
s = Series([], index=pd.date_range(start="2018-01-01", periods=0), dtype=int)
result = s.apply(lambda x: x)
tm.assert_series_equal(result, s)
def test_transform(string_series):
# transforming functions
with np.errstate(all="ignore"):
f_sqrt = np.sqrt(string_series)
f_abs = np.abs(string_series)
# ufunc
result = string_series.apply(np.sqrt)
expected = f_sqrt.copy()
tm.assert_series_equal(result, expected)
# list-like
result = string_series.apply([np.sqrt])
expected = f_sqrt.to_frame().copy()
expected.columns = ["sqrt"]
tm.assert_frame_equal(result, expected)
result = string_series.apply(["sqrt"])
tm.assert_frame_equal(result, expected)
# multiple items in list
# these are in the order as if we are applying both functions per
# series and then concatting
expected = concat([f_sqrt, f_abs], axis=1)
expected.columns = ["sqrt", "absolute"]
result = string_series.apply([np.sqrt, np.abs])
tm.assert_frame_equal(result, expected)
# dict, provide renaming
expected = concat([f_sqrt, f_abs], axis=1)
expected.columns = ["foo", "bar"]
expected = expected.unstack().rename("series")
result = string_series.apply({"foo": np.sqrt, "bar": np.abs})
tm.assert_series_equal(result.reindex_like(expected), expected)
@pytest.mark.parametrize("op", series_transform_kernels)
def test_transform_partial_failure(op, request):
# GH 35964
if op in ("ffill", "bfill", "pad", "backfill", "shift"):
request.node.add_marker(
pytest.mark.xfail(
raises=AssertionError, reason=f"{op} is successful on any dtype"
)
)
if op in ("rank", "fillna"):
pytest.skip(f"{op} doesn't raise TypeError on object")
# Using object makes most transform kernels fail
ser = Series(3 * [object])
expected = ser.transform(["shift"])
match = rf"\['{op}'\] did not transform successfully"
with tm.assert_produces_warning(FutureWarning, match=match):
result = ser.transform([op, "shift"])
tm.assert_equal(result, expected)
expected = ser.transform({"B": "shift"})
match = r"\['A'\] did not transform successfully"
with tm.assert_produces_warning(FutureWarning, match=match):
result = ser.transform({"A": op, "B": "shift"})
tm.assert_equal(result, expected)
expected = ser.transform({"B": ["shift"]})
match = r"\['A'\] did not transform successfully"
with tm.assert_produces_warning(FutureWarning, match=match):
result = ser.transform({"A": [op], "B": ["shift"]})
tm.assert_equal(result, expected)
match = r"\['B'\] did not transform successfully"
with tm.assert_produces_warning(FutureWarning, match=match):
expected = ser.transform({"A": ["shift"], "B": [op]})
match = rf"\['{op}'\] did not transform successfully"
with tm.assert_produces_warning(FutureWarning, match=match):
result = ser.transform({"A": [op, "shift"], "B": [op]})
tm.assert_equal(result, expected)
def test_transform_partial_failure_valueerror():
# GH 40211
match = ".*did not transform successfully"
def noop(x):
return x
def raising_op(_):
raise ValueError
ser = Series(3 * [object])
expected = ser.transform([noop])
with tm.assert_produces_warning(FutureWarning, match=match):
result = ser.transform([noop, raising_op])
tm.assert_equal(result, expected)
expected = ser.transform({"B": noop})
with tm.assert_produces_warning(FutureWarning, match=match):
result = ser.transform({"A": raising_op, "B": noop})
tm.assert_equal(result, expected)
expected = ser.transform({"B": [noop]})
with tm.assert_produces_warning(FutureWarning, match=match):
result = ser.transform({"A": [raising_op], "B": [noop]})
tm.assert_equal(result, expected)
expected = ser.transform({"A": [noop], "B": [noop]})
with tm.assert_produces_warning(FutureWarning, match=match):
result = ser.transform({"A": [noop, raising_op], "B": [noop]})
tm.assert_equal(result, expected)
def test_demo():
# demonstration tests
s = Series(range(6), dtype="int64", name="series")
result = s.agg(["min", "max"])
expected = Series([0, 5], index=["min", "max"], name="series")
tm.assert_series_equal(result, expected)
result = s.agg({"foo": "min"})
expected = Series([0], index=["foo"], name="series")
tm.assert_series_equal(result, expected)
def test_agg_apply_evaluate_lambdas_the_same(string_series):
# test that we are evaluating row-by-row first
# before vectorized evaluation
result = string_series.apply(lambda x: str(x))
expected = string_series.agg(lambda x: str(x))
tm.assert_series_equal(result, expected)
result = string_series.apply(str)
expected = string_series.agg(str)
tm.assert_series_equal(result, expected)
def test_with_nested_series(datetime_series):
# GH 2316
# .agg with a reducer and a transform, what to do
result = datetime_series.apply(lambda x: Series([x, x ** 2], index=["x", "x^2"]))
expected = DataFrame({"x": datetime_series, "x^2": datetime_series ** 2})
tm.assert_frame_equal(result, expected)
result = datetime_series.agg(lambda x: Series([x, x ** 2], index=["x", "x^2"]))
tm.assert_frame_equal(result, expected)
def test_replicate_describe(string_series):
# this also tests a result set that is all scalars
expected = string_series.describe()
result = string_series.apply(
{
"count": "count",
"mean": "mean",
"std": "std",
"min": "min",
"25%": lambda x: x.quantile(0.25),
"50%": "median",
"75%": lambda x: x.quantile(0.75),
"max": "max",
}
)
tm.assert_series_equal(result, expected)
def test_reduce(string_series):
# reductions with named functions
result = string_series.agg(["sum", "mean"])
expected = Series(
[string_series.sum(), string_series.mean()],
["sum", "mean"],
name=string_series.name,
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_non_callable_aggregates(how):
# test agg using non-callable series attributes
# GH 39116 - expand to apply
s = Series([1, 2, None])
# Calling agg w/ just a string arg same as calling s.arg
result = getattr(s, how)("size")
expected = s.size
assert result == expected
# test when mixed w/ callable reducers
result = getattr(s, how)(["size", "count", "mean"])
expected = Series({"size": 3.0, "count": 2.0, "mean": 1.5})
tm.assert_series_equal(result, expected)
def test_series_apply_no_suffix_index():
# GH36189
s = Series([4] * 3)
result = s.apply(["sum", lambda x: x.sum(), lambda x: x.sum()])
expected = Series([12, 12, 12], index=["sum", "<lambda>", "<lambda>"])
tm.assert_series_equal(result, expected)
def test_map(datetime_series):
index, data = tm.getMixedTypeDict()
source = Series(data["B"], index=data["C"])
target = Series(data["C"][:4], index=data["D"][:4])
merged = target.map(source)
for k, v in merged.items():
assert v == source[target[k]]
# input could be a dict
merged = target.map(source.to_dict())
for k, v in merged.items():
assert v == source[target[k]]
# function
result = datetime_series.map(lambda x: x * 2)
tm.assert_series_equal(result, datetime_series * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
tm.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(["b", "c", "d", "e"]))
c = Series([1, 2, 3, 4], index=Index(["b", "c", "d", "e"]))
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
tm.assert_series_equal(a.map(c), exp)
a = Series(["a", "b", "c", "d"])
b = Series(
["B", "C", "D", "E"],
dtype="category",
index=pd.CategoricalIndex(["b", "c", "d", "e"]),
)
c = Series(["B", "C", "D", "E"], index=Index(["b", "c", "d", "e"]))
exp = Series(
pd.Categorical([np.nan, "B", "C", "D"], categories=["B", "C", "D", "E"])
)
tm.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, "B", "C", "D"])
tm.assert_series_equal(a.map(c), exp)
def test_map_empty(index):
if isinstance(index, MultiIndex):
pytest.skip("Initializing a Series from a MultiIndex is not supported")
s = Series(index)
result = s.map({})
expected = Series(np.nan, index=s.index)
tm.assert_series_equal(result, expected)
def test_map_compat():
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: "foo", False: "bar"})
expected = Series(["foo", "foo", "bar"], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_int():
left = Series({"a": 1.0, "b": 2.0, "c": 3.0, "d": 4})
right = Series({1: 11, 2: 22, 3: 33})
assert left.dtype == np.float_
assert issubclass(right.dtype.type, np.integer)
merged = left.map(right)
assert merged.dtype == np.float_
assert isna(merged["d"])
assert not isna(merged["c"])
def test_map_type_inference():
s = Series(range(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
assert issubclass(s2.dtype.type, np.integer)
def test_map_decimal(string_series):
from decimal import Decimal
result = string_series.map(lambda x: Decimal(str(x)))
assert result.dtype == np.object_
assert isinstance(result[0], Decimal)
def test_map_na_exclusion():
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action="ignore")
exp = s * 2
tm.assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys():
"""
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
"""
# GH 18496
df = DataFrame({"a": [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {(1,): "A", (2,): "B", (3, 4): "A", (5, 6): "B"}
df["labels"] = df["a"].map(label_mappings)
df["expected_labels"] = Series(["A", "B", "A", "B"], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df["labels"], df["expected_labels"], check_names=False)
def test_map_counter():
s = Series(["a", "b", "c"], index=[1, 2, 3])
counter = Counter()
counter["b"] = 5
counter["c"] += 1
result = s.map(counter)
expected = Series([0, 5, 1], index=[1, 2, 3])
tm.assert_series_equal(result, expected)
def test_map_defaultdict():
s = Series([1, 2, 3], index=["a", "b", "c"])
default_dict = defaultdict(lambda: "blank")
default_dict[1] = "stuff"
result = s.map(default_dict)
expected = Series(["stuff", "blank", "blank"], index=["a", "b", "c"])
tm.assert_series_equal(result, expected)
def test_map_dict_na_key():
# https://github.com/pandas-dev/pandas/issues/17648
# Checks that np.nan key is appropriately mapped
s = Series([1, 2, np.nan])
expected = Series(["a", "b", "c"])
result = s.map({1: "a", 2: "b", np.nan: "c"})
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_with_missing():
"""
Test Series.map with a dictionary subclass that defines __missing__,
i.e. sets a default value (GH #15999).
"""
class DictWithMissing(dict):
def __missing__(self, key):
return "missing"
s = Series([1, 2, 3])
dictionary = DictWithMissing({3: "three"})
result = s.map(dictionary)
expected = Series(["missing", "missing", "three"])
tm.assert_series_equal(result, expected)
def test_map_dict_subclass_without_missing():
class DictWithoutMissing(dict):
pass
s = Series([1, 2, 3])
dictionary = DictWithoutMissing({3: "three"})
result = s.map(dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping(non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
s = Series([1, 2, 3])
not_a_dictionary = non_dict_mapping_subclass({3: "three"})
result = s.map(not_a_dictionary)
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_abc_mapping_with_missing(non_dict_mapping_subclass):
# https://github.com/pandas-dev/pandas/issues/29733
# Check collections.abc.Mapping support as mapper for Series.map
class NonDictMappingWithMissing(non_dict_mapping_subclass):
def __missing__(key):
return "missing"
s = Series([1, 2, 3])
not_a_dictionary = NonDictMappingWithMissing({3: "three"})
result = s.map(not_a_dictionary)
# __missing__ is a dict concept, not a Mapping concept,
# so it should not change the result!
expected = Series([np.nan, np.nan, "three"])
tm.assert_series_equal(result, expected)
def test_map_box():
vals = [pd.Timestamp("2011-01-01"), pd.Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
# boxed value must be Timestamp instance
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_None", "Timestamp_2_None"])
tm.assert_series_equal(res, exp)
vals = [
pd.Timestamp("2011-01-01", tz="US/Eastern"),
pd.Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.day}_{x.tz}")
exp = Series(["Timestamp_1_US/Eastern", "Timestamp_2_US/Eastern"])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta("1 days"), pd.Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.days}")
exp = Series(["Timedelta_1", "Timedelta_2"])
tm.assert_series_equal(res, exp)
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
res = s.apply(lambda x: f"{type(x).__name__}_{x.freqstr}")
exp = Series(["Period_M", "Period_M"])
tm.assert_series_equal(res, exp)
def test_map_categorical():
values = pd.Categorical(list("ABBABCD"), categories=list("DCBA"), ordered=True)
s = Series(values, name="XX", index=list("abcdefg"))
result = s.map(lambda x: x.lower())
exp_values = pd.Categorical(list("abbabcd"), categories=list("dcba"), ordered=True)
exp = Series(exp_values, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
tm.assert_categorical_equal(result.values, exp_values)
result = s.map(lambda x: "A")
exp = Series(["A"] * 7, name="XX", index=list("abcdefg"))
tm.assert_series_equal(result, exp)
assert result.dtype == object
def test_map_datetimetz():
values = pd.date_range("2011-01-01", "2011-01-02", freq="H").tz_localize(
"Asia/Tokyo"
)
s = Series(values, name="XX")
# keep tz
result = s.map(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range("2011-01-02", "2011-01-03", freq="H").tz_localize(
"Asia/Tokyo"
)
exp = Series(exp_values, name="XX")
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.map(lambda x: x.hour)
exp = Series(list(range(24)) + [0], name="XX", dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = Series(["Asia/Tokyo"] * 25, name="XX")
tm.assert_series_equal(result, exp)
@pytest.mark.parametrize(
"vals,mapping,exp",
[
(list("abc"), {np.nan: "not NaN"}, [np.nan] * 3 + ["not NaN"]),
(list("abc"), {"a": "a letter"}, ["a letter"] + [np.nan] * 3),
(list(range(3)), {0: 42}, [42] + [np.nan] * 3),
],
)
def test_map_missing_mixed(vals, mapping, exp):
# GH20495
s = Series(vals + [np.nan])
result = s.map(mapping)
tm.assert_series_equal(result, Series(exp))
@pytest.mark.parametrize(
"dti,exp",
[
(
Series([1, 2], index=pd.DatetimeIndex([0, 31536000000])),
DataFrame(np.repeat([[1, 2]], 2, axis=0), dtype="int64"),
),
(
tm.makeTimeSeries(nper=30),
DataFrame(np.repeat([[1, 2]], 30, axis=0), dtype="int64"),
),
],
)
@pytest.mark.parametrize("aware", [True, False])
def test_apply_series_on_date_time_index_aware_series(dti, exp, aware):
# GH 25959
# Calling apply on a localized time series should not cause an error
if aware:
index = dti.tz_localize("UTC").index
else:
index = dti.index
result = Series(index).apply(lambda x: Series([1, 2]))
tm.assert_frame_equal(result, exp)
def test_apply_scalar_on_date_time_index_aware_series():
# GH 25959
# Calling apply on a localized time series should not cause an error
series = tm.makeTimeSeries(nper=30).tz_localize("UTC")
result = Series(series.index).apply(lambda x: 1)
tm.assert_series_equal(result, Series(np.ones(30), dtype="int64"))
def test_map_float_to_string_precision():
# GH 13228
ser = Series(1 / 3)
result = ser.map(lambda val: str(val)).to_dict()
expected = {0: "0.3333333333333333"}
assert result == expected
def test_apply_to_timedelta():
list_of_valid_strings = ["00:00:01", "00:00:02"]
a = pd.to_timedelta(list_of_valid_strings)
b = Series(list_of_valid_strings).apply(pd.to_timedelta)
# FIXME: dont leave commented-out
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
list_of_strings = ["00:00:01", np.nan, pd.NaT, pd.NaT]
a = pd.to_timedelta(list_of_strings) # noqa
with tm.assert_produces_warning(FutureWarning, match="Inferring timedelta64"):
ser = Series(list_of_strings)
b = ser.apply(pd.to_timedelta) # noqa
# Can't compare until apply on a Series gives the correct dtype
# assert_series_equal(a, b)
@pytest.mark.parametrize(
"ops, names",
[
([np.sum], ["sum"]),
([np.sum, np.mean], ["sum", "mean"]),
(np.array([np.sum]), ["sum"]),
(np.array([np.sum, np.mean]), ["sum", "mean"]),
],
)
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_apply_listlike_reducer(string_series, ops, names, how):
# GH 39140
expected = Series({name: op(string_series) for name, op in zip(names, ops)})
expected.name = "series"
result = getattr(string_series, how)(ops)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ops",
[
{"A": np.sum},
{"A": np.sum, "B": np.mean},
Series({"A": np.sum}),
Series({"A": np.sum, "B": np.mean}),
],
)
@pytest.mark.parametrize("how", ["agg", "apply"])
def test_apply_dictlike_reducer(string_series, ops, how):
# GH 39140
expected = Series({name: op(string_series) for name, op in ops.items()})
expected.name = string_series.name
result = getattr(string_series, how)(ops)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ops, names",
[
([np.sqrt], ["sqrt"]),
([np.abs, np.sqrt], ["absolute", "sqrt"]),
(np.array([np.sqrt]), ["sqrt"]),
(np.array([np.abs, np.sqrt]), ["absolute", "sqrt"]),
],
)
def test_apply_listlike_transformer(string_series, ops, names):
# GH 39140
with np.errstate(all="ignore"):
expected = concat([op(string_series) for op in ops], axis=1)
expected.columns = names
result = string_series.apply(ops)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"ops",
[
{"A": np.sqrt},
{"A": np.sqrt, "B": np.exp},
Series({"A": np.sqrt}),
Series({"A": np.sqrt, "B": np.exp}),
],
)
def test_apply_dictlike_transformer(string_series, ops):
# GH 39140
with np.errstate(all="ignore"):
expected = concat({name: op(string_series) for name, op in ops.items()})
expected.name = string_series.name
result = string_series.apply(ops)
tm.assert_series_equal(result, expected)
def test_apply_retains_column_name():
# GH 16380
df = DataFrame({"x": range(3)}, Index(range(3), name="x"))
result = df.x.apply(lambda x: Series(range(x + 1), Index(range(x + 1), name="y")))
expected = DataFrame(
[[0.0, np.nan, np.nan], [0.0, 1.0, np.nan], [0.0, 1.0, 2.0]],
columns=Index(range(3), name="y"),
index=Index(range(3), name="x"),
)
tm.assert_frame_equal(result, expected)
| {
"content_hash": "6d1b24eda05d8273ca94a591a8fb39d3",
"timestamp": "",
"source": "github",
"line_count": 892,
"max_line_length": 88,
"avg_line_length": 31.477578475336323,
"alnum_prop": 0.600683809388133,
"repo_name": "jorisvandenbossche/pandas",
"id": "1d0b64c1835dfd79bfcb18c8854d3d82272e9288",
"size": "28078",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pandas/tests/apply/test_series_apply.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360342"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1083849"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17541583"
},
{
"name": "Shell",
"bytes": "10719"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import copy
import json
import os
from typing import Tuple
import pytest
from test_utilities.utils import assert_final_job_status, get_automl_job_properties
from azure.ai.ml import MLClient, automl
from azure.ai.ml.constants._common import AssetTypes
from azure.ai.ml.entities import Data
from azure.ai.ml.entities._inputs_outputs import Input
from azure.ai.ml.entities._job.automl import SearchSpace
from azure.ai.ml.entities._job.automl.image import ImageClassificationJob, ImageClassificationSearchSpace
from azure.ai.ml.operations._run_history_constants import JobStatus
from azure.ai.ml.sweep import BanditPolicy, Choice, Uniform
from devtools_testutils import AzureRecordedTestCase, is_live
@pytest.mark.automl_test
@pytest.mark.usefixtures("recorded_test")
@pytest.mark.skipif(
condition=not is_live(),
reason="Datasets downloaded by test are too large to record reliably"
)
class TestAutoMLImageClassification(AzureRecordedTestCase):
def _create_jsonl_multiclass(self, client, train_path, val_path):
src_images = "./fridgeObjects/"
train_validation_ratio = 5
# Path to the training and validation files
train_annotations_file = os.path.join(train_path, "train_annotations.jsonl")
validation_annotations_file = os.path.join(val_path, "validation_annotations.jsonl")
fridge_data = Data(
path="./fridgeObjects",
type=AssetTypes.URI_FOLDER,
)
data_path_uri = client.data.create_or_update(fridge_data)
# Baseline of json line dictionary
json_line_sample = {
"image_url": data_path_uri.path,
"label": "",
}
index = 0
# Scan each sub directary and generate a jsonl line per image, distributed on train and valid JSONL files
with open(train_annotations_file, "w") as train_f:
with open(validation_annotations_file, "w") as validation_f:
for className in os.listdir(src_images):
subDir = src_images + className
if not os.path.isdir(subDir):
continue
# Scan each sub directary
print("Parsing " + subDir)
for image in os.listdir(subDir):
json_line = dict(json_line_sample)
json_line["image_url"] += f"{className}/{image}"
json_line["label"] = className
if index % train_validation_ratio == 0:
# validation annotation
validation_f.write(json.dumps(json_line) + "\n")
else:
# train annotation
train_f.write(json.dumps(json_line) + "\n")
index += 1
def test_image_classification_multiclass_run(
self, image_classification_dataset: Tuple[Input, Input], client: MLClient
) -> None:
# Note: this test launches two jobs in order to avoid calling the dataset fixture more than once. Ideally, it
# would have sufficed to mark the fixture with session scope, but pytest-xdist breaks this functionality:
# https://github.com/pytest-dev/pytest-xdist/issues/271.
# Get training and validation data paths
train_path, val_path = image_classification_dataset
# Create jsonl file
self._create_jsonl_multiclass(client=client, train_path=train_path, val_path=val_path)
training_data = Input(type=AssetTypes.MLTABLE, path=train_path)
validation_data = Input(type=AssetTypes.MLTABLE, path=val_path)
# Make generic classification job
image_classification_job = automl.image_classification(
training_data=training_data,
target_column_name="label",
validation_data=validation_data,
primary_metric="accuracy",
compute="gpu-cluster",
experiment_name="image-e2e-tests",
properties=get_automl_job_properties(),
)
# Configure regular sweep job
image_classification_job_sweep = copy.deepcopy(image_classification_job)
image_classification_job_sweep.set_training_parameters(early_stopping=True, evaluation_frequency=1)
image_classification_job_sweep.extend_search_space(
[
SearchSpace(
model_name=Choice(["vitb16r224"]),
learning_rate=Uniform(0.001, 0.01),
number_of_epochs=Choice([15, 30]),
),
SearchSpace(
model_name=Choice(["seresnext"]),
layers_to_freeze=Choice([0, 2]),
),
]
)
image_classification_job_sweep.set_limits(max_trials=1, max_concurrent_trials=1)
image_classification_job_sweep.set_sweep(
sampling_algorithm="Random",
early_termination=BanditPolicy(evaluation_interval=2, slack_factor=0.2, delay_evaluation=6),
)
# Configure AutoMode job
image_classification_job_automode = copy.deepcopy(image_classification_job)
# TODO: after shipping the AutoMode feature, do not set flag and call `set_limits()` instead of changing
# the limits object directly.
image_classification_job_automode.properties["enable_automode"] = True
image_classification_job_automode.limits.max_trials = 2
image_classification_job_automode.limits.max_concurrent_trials = 2
# Trigger regular sweep and then AutoMode job
submitted_job_sweep = client.jobs.create_or_update(image_classification_job_sweep)
submitted_job_automode = client.jobs.create_or_update(image_classification_job_automode)
# Assert completion of regular sweep job
assert_final_job_status(submitted_job_sweep, client, ImageClassificationJob, JobStatus.COMPLETED, deadline=3600)
# Assert completion of Automode job
assert_final_job_status(submitted_job_automode, client, ImageClassificationJob, JobStatus.COMPLETED, deadline=3600)
| {
"content_hash": "6be0391399591ae773d458f670871cef",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 123,
"avg_line_length": 44.77372262773723,
"alnum_prop": 0.6371046625366807,
"repo_name": "Azure/azure-sdk-for-python",
"id": "1831c46eb28d32d86e70fc24fc93008233329379",
"size": "6315",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/tests/automl_job/e2etests/test_automl_image_classification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from .base import Layout
DEFAULT_FLOAT_WM_TYPES = set([
'utility',
'notification',
'toolbar',
'splash',
'dialog',
])
DEFAULT_FLOAT_RULES = [
{"role": "About"},
]
class Floating(Layout):
"""
Floating layout, which does nothing with windows but handles focus order
"""
defaults = [
("border_focus", "#0000ff", "Border colour for the focused window."),
("border_normal", "#000000", "Border colour for un-focused windows."),
("border_width", 1, "Border width."),
("max_border_width", 0, "Border width for maximize."),
("fullscreen_border_width", 0, "Border width for fullscreen."),
("name", "floating", "Name of this layout."),
(
"auto_float_types",
DEFAULT_FLOAT_WM_TYPES,
"default wm types to automatically float"
),
]
def __init__(self, float_rules=None, **config):
"""
If you have certain apps that you always want to float you can provide
``float_rules`` to do so. ``float_rules`` is a list of
dictionaries containing some or all of the keys::
{'wname': WM_NAME, 'wmclass': WM_CLASS, 'role': WM_WINDOW_ROLE}
The keys must be specified as above. You only need one, but
you need to provide the value for it. When a new window is
opened it's ``match`` method is called with each of these
rules. If one matches, the window will float. The following
will float gimp and skype::
float_rules=[dict(wmclass="skype"), dict(wmclass="gimp")]
Specify these in the ``floating_layout`` in your config.
"""
Layout.__init__(self, **config)
self.clients = []
self.focused = None
self.group = None
self.float_rules = float_rules or DEFAULT_FLOAT_RULES
self.add_defaults(Floating.defaults)
def match(self, win):
"""Used to default float some windows"""
if win.window.get_wm_type() in self.auto_float_types:
return True
for rule_dict in self.float_rules:
if win.match(**rule_dict):
return True
return False
def find_clients(self, group):
"""Find all clients belonging to a given group"""
return [c for c in self.clients if c.group is group]
def to_screen(self, group, new_screen):
"""Adjust offsets of clients within current screen"""
for win in self.find_clients(group):
if win.maximized:
win.maximized = True
elif win.fullscreen:
win.fullscreen = True
else:
# catch if the client hasn't been configured
try:
# By default, place window at same offset from top corner
new_x = new_screen.x + win.float_x
new_y = new_screen.y + win.float_y
except AttributeError:
# this will be handled in .configure()
pass
else:
# make sure window isn't off screen left/right...
new_x = min(new_x, new_screen.x + new_screen.width - win.width)
new_x = max(new_x, new_screen.x)
# and up/down
new_y = min(new_y, new_screen.y + new_screen.height - win.height)
new_y = max(new_y, new_screen.y)
win.x = new_x
win.y = new_y
win.group = new_screen.group
def focus_first(self, group=None):
if group is None:
clients = self.clients
else:
clients = self.find_clients(group)
if clients:
return clients[0]
def focus_next(self, win):
if win not in self.clients or win.group is None:
return
clients = self.find_clients(win.group)
idx = clients.index(win)
if len(clients) > idx + 1:
return clients[idx + 1]
def focus_last(self, group=None):
if group is None:
clients = self.clients
else:
clients = self.find_clients(group)
if clients:
return clients[-1]
def focus_previous(self, win):
if win not in self.clients or win.group is None:
return
clients = self.find_clients(win.group)
idx = clients.index(win)
if idx > 0:
return clients[idx - 1]
def focus(self, client):
self.focused = client
def blur(self):
self.focused = None
def configure(self, client, screen):
if client is self.focused:
bc = client.group.qtile.colorPixel(self.border_focus)
else:
bc = client.group.qtile.colorPixel(self.border_normal)
if client.maximized:
bw = self.max_border_width
elif client.fullscreen:
bw = self.fullscreen_border_width
else:
bw = self.border_width
# We definitely have a screen here, so let's be sure we'll float on screen
try:
client.float_x
client.float_y
except AttributeError:
# this window hasn't been placed before, let's put it in a sensible spot
x = screen.x + client.x % screen.width
# try to get right edge on screen (without moving the left edge off)
x = min(x, screen.x - client.width)
x = max(x, screen.x)
# then update it's position (`.place()` will take care of `.float_x`)
client.x = x
y = screen.y + client.y % screen.height
y = min(y, screen.y - client.height)
y = max(y, screen.y)
client.y = y
client.place(
client.x,
client.y,
client.width,
client.height,
bw,
bc
)
client.unhide()
def add(self, client):
self.clients.append(client)
self.focused = client
def remove(self, client):
if client not in self.clients:
return
next_focus = self.focus_next(client)
if client is self.focused:
self.blur()
self.clients.remove(client)
return next_focus
def info(self):
d = Layout.info(self)
d["clients"] = [c.name for c in self.clients]
return d
def cmd_next(self):
# This can't ever be called, but implement the abstract method
pass
def cmd_previous(self):
# This can't ever be called, but implement the abstract method
pass
| {
"content_hash": "41090d5bb79d44e5f48ba69ffebfb955",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 85,
"avg_line_length": 32.091787439613526,
"alnum_prop": 0.5426765015806112,
"repo_name": "jdowner/qtile",
"id": "3663d9cb0197367030c9684b524302f4acdba84c",
"size": "8049",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "libqtile/layout/floating.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "3598"
},
{
"name": "Makefile",
"bytes": "1332"
},
{
"name": "Python",
"bytes": "919886"
},
{
"name": "Shell",
"bytes": "2833"
}
],
"symlink_target": ""
} |
"""
backend.agent_backend.impl_knot2
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Knot DNS agent backend
Create, update, delete zones locally on a Knot DNS resolver using the
knotc utility.
Supported Knot versions: >= 2.1, < 3
`Knot DNS 2 User documentation <../../admin/backends/knot2_agent.html>`_
.. WARNING::
Untested, do not use in production.
.. NOTE::
If the backend is killed during a configuration transaction it might be
required to manually abort the transaction with `sudo knotc conf-abort`
Configured in [service:agent:knot2]
"""
from oslo_concurrency import lockutils
from oslo_concurrency.processutils import ProcessExecutionError
from oslo_config import cfg
from oslo_log import log as logging
from designate.backend.agent_backend import base
from designate import exceptions
from designate.utils import execute
CFG_GROUP_NAME = 'backend:agent:knot2'
LOG = logging.getLogger(__name__)
# rootwrap requires a command name instead of full path
KNOTC_DEFAULT_PATH = 'knotc'
# TODO(Federico) on zone creation and update, agent.handler unnecessarily
# perfors AXFR from MiniDNS to the Agent to populate the `zone` argument
# (needed by the Bind backend)
class Knot2Backend(base.AgentBackend):
__plugin_name__ = 'knot2'
__backend_status__ = 'untested'
_lock_name = 'knot2.lock'
def __init__(self, *a, **kw):
"""Configure the backend"""
super(Knot2Backend, self).__init__(*a, **kw)
self._knotc_cmd_name = cfg.CONF[CFG_GROUP_NAME].knotc_cmd_name
def start(self):
"""Start the backend"""
LOG.info("Started knot2 backend")
def _execute_knotc(self, *knotc_args, **kw):
"""Run the Knot client and check the output
:param expected_output: expected output (default: 'OK')
:type expected_output: str
:param expected_error: expected alternative output, will be \
logged as info(). Default: not set.
:type expected_error: str
"""
# Knotc returns "0" even on failure, we have to check for 'OK'
# https://gitlab.labs.nic.cz/labs/knot/issues/456
LOG.debug("Executing knotc with %r", knotc_args)
expected = kw.get('expected_output', 'OK')
expected_alt = kw.get('expected_error', None)
try:
out, err = execute(self._knotc_cmd_name, *knotc_args)
out = out.rstrip()
LOG.debug("Command output: %r", out)
if out != expected:
if expected_alt is not None and out == expected_alt:
LOG.info("Ignoring error: %r", out)
else:
raise ProcessExecutionError(stdout=out, stderr=err)
except ProcessExecutionError as e:
LOG.error("Command output: %(out)r Stderr: %(err)r",
{
'out': e.stdout,
'err': e.stderr
})
raise exceptions.Backend(e)
def _start_minidns_to_knot_axfr(self, zone_name):
"""Instruct Knot to request an AXFR from MiniDNS. No need to lock
or enter a configuration transaction.
"""
self._execute_knotc('zone-refresh', zone_name)
def _modify_zone(self, *knotc_args, **kw):
"""Create or delete a zone while locking, and within a
Knot transaction.
Knot supports only one config transaction at a time.
:raises: exceptions.Backend
"""
with lockutils.lock(self._lock_name):
self._execute_knotc('conf-begin')
try:
self._execute_knotc(*knotc_args, **kw)
# conf-diff can be used for debugging
# self._execute_knotc('conf-diff')
except Exception as e:
self._execute_knotc('conf-abort')
LOG.info("Zone change aborted: %r", e)
raise
else:
self._execute_knotc('conf-commit')
def find_zone_serial(self, zone_name):
"""Get serial from a zone by running knotc
:returns: serial (int or None)
:raises: exceptions.Backend
"""
zone_name = zone_name.rstrip('.')
LOG.debug("Finding %s", zone_name)
# Output example:
# [530336536.com.] type: slave | serial: 0 | next-event: idle |
# auto-dnssec: disabled]
try:
out, err = execute(self._knotc_cmd_name, 'zone-status', zone_name)
except ProcessExecutionError as e:
if 'no such zone' in e.stdout:
# Zone not found
return None
LOG.error("Command output: %(out)r Stderr: %(err)r",
{
'out': e.stdout,
'err': e.stderr
})
raise exceptions.Backend(e)
try:
serial = out.split('|')[1].split()[1]
return int(serial)
except Exception:
LOG.error("Unable to parse knotc output: %r", out)
raise exceptions.Backend("Unexpected knotc zone-status output")
def create_zone(self, zone):
"""Create a new Zone by executing knotc
Do not raise exceptions if the zone already exists.
:param zone: zone to be created
:type zone: raw pythondns Zone
"""
zone_name = zone.origin.to_text(omit_final_dot=True)
if isinstance(zone_name, bytes):
zone_name = zone_name.decode('utf-8')
LOG.debug("Creating %s", zone_name)
# The zone might be already in place due to a race condition between
# checking if the zone is there and creating it across different
# greenlets
self._modify_zone('conf-set', 'zone[%s]' % zone_name,
expected_error='duplicate identifier')
LOG.debug("Triggering initial AXFR from MiniDNS to Knot for %s",
zone_name)
self._start_minidns_to_knot_axfr(zone_name)
def update_zone(self, zone):
"""Instruct Knot DNS to perform AXFR from MiniDNS
:param zone: zone to be created
:type zone: raw pythondns Zone
"""
zone_name = zone.origin.to_text(omit_final_dot=True)
if isinstance(zone_name, bytes):
zone_name = zone_name.decode('utf-8')
LOG.debug("Triggering AXFR from MiniDNS to Knot for %s", zone_name)
self._start_minidns_to_knot_axfr(zone_name)
def delete_zone(self, zone_name):
"""Delete a new Zone by executing knotc
Do not raise exceptions if the zone does not exist.
:param zone_name: zone name
:type zone_name: str
"""
LOG.debug('Delete Zone: %s' % zone_name)
self._modify_zone('conf-unset', 'zone[%s]' % zone_name,
expected_error='invalid identifier')
| {
"content_hash": "0a33d4c1187d790b32576fbf0e77154c",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 78,
"avg_line_length": 35.5625,
"alnum_prop": 0.5842120679554774,
"repo_name": "openstack/designate",
"id": "ca9b6b15f7a9344c4fb32f7700d974b9cc1fbd8b",
"size": "7499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/backend/agent_backend/impl_knot2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "71074"
},
{
"name": "Jinja",
"bytes": "2004"
},
{
"name": "Mako",
"bytes": "1012"
},
{
"name": "Python",
"bytes": "2442862"
},
{
"name": "Shell",
"bytes": "46200"
}
],
"symlink_target": ""
} |
"""
A collection of "vanilla" transforms for spatial operations
https://github.com/Project-MONAI/MONAI/wiki/MONAI_Design
"""
import warnings
from copy import deepcopy
from enum import Enum
from itertools import zip_longest
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import torch
from monai.config import USE_COMPILED, DtypeLike
from monai.config.type_definitions import NdarrayOrTensor
from monai.data.meta_obj import get_track_meta
from monai.data.meta_tensor import MetaTensor
from monai.data.utils import AFFINE_TOL, affine_to_spacing, compute_shape_offset, iter_patch, to_affine_nd, zoom_affine
from monai.networks.layers import AffineTransform, GaussianFilter, grid_pull
from monai.networks.utils import meshgrid_ij, normalize_transform
from monai.transforms.croppad.array import CenterSpatialCrop, ResizeWithPadOrCrop
from monai.transforms.intensity.array import GaussianSmooth
from monai.transforms.inverse import InvertibleTransform
from monai.transforms.transform import Randomizable, RandomizableTransform, Transform
from monai.transforms.utils import (
convert_pad_mode,
create_control_grid,
create_grid,
create_rotate,
create_scale,
create_shear,
create_translate,
map_spatial_axes,
scale_affine,
)
from monai.transforms.utils_pytorch_numpy_unification import allclose, linalg_inv, moveaxis, where
from monai.utils import (
GridSampleMode,
GridSamplePadMode,
InterpolateMode,
NdimageMode,
NumpyPadMode,
SplineMode,
convert_to_cupy,
convert_to_dst_type,
convert_to_numpy,
convert_to_tensor,
ensure_tuple,
ensure_tuple_rep,
ensure_tuple_size,
fall_back_tuple,
issequenceiterable,
optional_import,
pytorch_after,
)
from monai.utils.deprecate_utils import deprecated_arg
from monai.utils.enums import GridPatchSort, PytorchPadMode, TraceKeys, TransformBackends, WSIPatchKeys
from monai.utils.misc import ImageMetaKey as Key
from monai.utils.module import look_up_option
from monai.utils.type_conversion import convert_data_type, get_equivalent_dtype, get_torch_dtype_from_string
nib, has_nib = optional_import("nibabel")
cupy, _ = optional_import("cupy")
cupy_ndi, _ = optional_import("cupyx.scipy.ndimage")
np_ndi, _ = optional_import("scipy.ndimage")
__all__ = [
"SpatialResample",
"ResampleToMatch",
"Spacing",
"Orientation",
"Flip",
"GridDistortion",
"GridSplit",
"GridPatch",
"RandGridPatch",
"Resize",
"Rotate",
"Zoom",
"Rotate90",
"RandRotate90",
"RandRotate",
"RandFlip",
"RandGridDistortion",
"RandAxisFlip",
"RandZoom",
"AffineGrid",
"RandAffineGrid",
"RandDeformGrid",
"Resample",
"Affine",
"RandAffine",
"Rand2DElastic",
"Rand3DElastic",
]
RandRange = Optional[Union[Sequence[Union[Tuple[float, float], float]], float]]
class SpatialResample(InvertibleTransform):
"""
Resample input image from the orientation/spacing defined by ``src_affine`` affine matrix into
the ones specified by ``dst_affine`` affine matrix.
Internally this transform computes the affine transform matrix from ``src_affine`` to ``dst_affine``,
by ``xform = linalg.solve(src_affine, dst_affine)``, and call ``monai.transforms.Affine`` with ``xform``.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY, TransformBackends.CUPY]
def __init__(
self,
mode: Union[str, int] = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: DtypeLike = np.float64,
):
"""
Args:
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
dtype: data type for resampling computation. Defaults to ``float64`` for best precision.
If ``None``, use the data type of input data. To be compatible with other modules,
the output data type is always ``float32``.
"""
self.mode = mode
self.padding_mode = padding_mode
self.align_corners = align_corners
self.dtype = dtype
def _post_process(
self,
img: torch.Tensor,
src_affine: torch.Tensor,
dst_affine: torch.Tensor,
mode,
padding_mode,
align_corners,
original_spatial_shape,
) -> torch.Tensor:
"""
Small fn to simplify returning data. If `MetaTensor`, update affine. Elif
tracking metadata is desired, create `MetaTensor` with affine. Else, return
image as `torch.Tensor`. Output type is always `float32`.
Also append the transform to the stack.
"""
dtype = img.dtype
img = convert_to_tensor(img, track_meta=get_track_meta(), dtype=torch.float32)
if get_track_meta():
self.update_meta(img, dst_affine)
self.push_transform(
img,
extra_info={
"dtype": str(dtype)[6:], # dtype as string; remove "torch": torch.float32 -> float32
"mode": mode.value if isinstance(mode, Enum) else mode,
"padding_mode": padding_mode.value if isinstance(padding_mode, Enum) else padding_mode,
"align_corners": align_corners if align_corners is not None else TraceKeys.NONE,
"src_affine": src_affine,
},
orig_size=original_spatial_shape,
)
return img
def update_meta(self, img, dst_affine):
img.affine = dst_affine
@deprecated_arg(
name="src_affine", since="0.9", msg_suffix="img should be `MetaTensor`, so affine can be extracted directly."
)
def __call__(
self,
img: torch.Tensor,
src_affine: Optional[NdarrayOrTensor] = None,
dst_affine: Optional[torch.Tensor] = None,
spatial_size: Optional[Union[Sequence[int], torch.Tensor, int]] = None,
mode: Union[str, int, None] = None,
padding_mode: Optional[str] = None,
align_corners: Optional[bool] = None,
dtype: DtypeLike = None,
) -> torch.Tensor:
"""
Args:
img: input image to be resampled. It currently supports channel-first arrays with
at most three spatial dimensions.
dst_affine: destination affine matrix. Defaults to ``None``, which means the same as `img.affine`.
the shape should be `(r+1, r+1)` where `r` is the spatial rank of ``img``.
when `dst_affine` and `spatial_size` are None, the input will be returned without resampling,
but the data type will be `float32`.
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined,
the transform will compute a spatial size automatically containing the previous field of view.
if `spatial_size` is ``-1`` are the transform will use the corresponding input img size.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
Defaults to ``None``, effectively using the value of `self.align_corners`.
dtype: data type for resampling computation. Defaults to ``self.dtype`` or
``np.float64`` (for best precision). If ``None``, use the data type of input data.
To be compatible with other modules, the output data type is always `float32`.
The spatial rank is determined by the smallest among ``img.ndim -1``, ``len(src_affine) - 1``, and ``3``.
When both ``monai.config.USE_COMPILED`` and ``align_corners`` are set to ``True``,
MONAI's resampling implementation will be used.
Set `dst_affine` and `spatial_size` to `None` to turn off the resampling step.
"""
# get dtype as torch (e.g., torch.float64)
_dtype = get_equivalent_dtype(dtype or self.dtype or img.dtype, torch.Tensor)
align_corners = self.align_corners if align_corners is None else align_corners
mode = mode if mode is not None else self.mode
padding_mode = padding_mode if padding_mode is not None else self.padding_mode
original_spatial_shape = img.shape[1:]
src_affine_: torch.Tensor = img.affine if isinstance(img, MetaTensor) else torch.eye(4)
img = convert_to_tensor(data=img, track_meta=get_track_meta(), dtype=_dtype)
spatial_rank = min(len(img.shape) - 1, src_affine_.shape[0] - 1, 3)
if (not isinstance(spatial_size, int) or spatial_size != -1) and spatial_size is not None:
spatial_rank = min(len(ensure_tuple(spatial_size)), 3) # infer spatial rank based on spatial_size
src_affine_ = to_affine_nd(spatial_rank, src_affine_).to(_dtype)
dst_affine = to_affine_nd(spatial_rank, dst_affine) if dst_affine is not None else src_affine_
dst_affine = convert_to_dst_type(dst_affine, src_affine_)[0]
if not isinstance(dst_affine, torch.Tensor):
raise ValueError(f"dst_affine should be a torch.Tensor, got {type(dst_affine)}")
in_spatial_size = torch.tensor(img.shape[1 : spatial_rank + 1])
if isinstance(spatial_size, int) and (spatial_size == -1): # using the input spatial size
spatial_size = in_spatial_size
elif spatial_size is None and spatial_rank > 1: # auto spatial size
spatial_size, _ = compute_shape_offset(in_spatial_size, src_affine_, dst_affine) # type: ignore
spatial_size = torch.tensor(fall_back_tuple(ensure_tuple(spatial_size)[:spatial_rank], in_spatial_size))
if (
allclose(src_affine_, dst_affine, atol=AFFINE_TOL)
and allclose(spatial_size, in_spatial_size)
or spatial_rank == 1
):
# no significant change, return original image
return self._post_process(
img, src_affine_, src_affine_, mode, padding_mode, align_corners, original_spatial_shape
)
try:
_s = convert_to_tensor(src_affine_, track_meta=False, device=torch.device("cpu"))
_d = convert_to_tensor(dst_affine, track_meta=False, device=torch.device("cpu"))
xform = (
torch.linalg.solve(_s, _d) if pytorch_after(1, 8, 0) else torch.solve(_d, _s).solution # type: ignore
)
except (np.linalg.LinAlgError, RuntimeError) as e:
raise ValueError("src affine is not invertible.") from e
xform = to_affine_nd(spatial_rank, xform).to(device=img.device, dtype=_dtype)
# no resampling if it's identity transform
if allclose(xform, torch.eye(len(xform)), atol=AFFINE_TOL) and allclose(spatial_size, in_spatial_size):
return self._post_process(
img, src_affine_, src_affine_, mode, padding_mode, align_corners, original_spatial_shape
)
in_spatial_size = in_spatial_size.tolist() # type: ignore
chns, additional_dims = img.shape[0], img.shape[spatial_rank + 1 :] # beyond three spatial dims
if additional_dims:
xform_shape = [-1] + in_spatial_size
img = img.reshape(xform_shape) # type: ignore
if isinstance(mode, int):
dst_xform_1 = normalize_transform(spatial_size, xform.device, xform.dtype, True, True)[0] # to (-1, 1)
if not align_corners:
norm = create_scale(spatial_rank, [(max(d, 2) - 1) / d for d in spatial_size], xform.device, "torch")
dst_xform_1 = norm.to(xform.dtype) @ dst_xform_1 # type: ignore # scaling (num_step - 1) / num_step
dst_xform_d = normalize_transform(spatial_size, xform.device, xform.dtype, align_corners, False)[0]
xform = xform @ torch.inverse(dst_xform_d) @ dst_xform_1
affine_xform = Affine(
affine=xform, spatial_size=spatial_size, normalized=True, image_only=True, dtype=_dtype
)
with affine_xform.trace_transform(False):
img = affine_xform(img, mode=mode, padding_mode=padding_mode)
else:
affine_xform = AffineTransform(
normalized=False,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
reverse_indexing=True,
)
img = affine_xform(img.unsqueeze(0), theta=xform, spatial_size=spatial_size).squeeze(0)
if additional_dims:
full_shape = (chns, *spatial_size, *additional_dims)
img = img.reshape(full_shape)
return self._post_process(
img, src_affine_, dst_affine, mode, padding_mode, align_corners, original_spatial_shape
)
def inverse(self, data: torch.Tensor) -> torch.Tensor:
transform = self.pop_transform(data)
# Create inverse transform
kw_args = transform[TraceKeys.EXTRA_INFO]
# need to convert dtype from string back to torch.dtype
kw_args["dtype"] = get_torch_dtype_from_string(kw_args["dtype"])
# source becomes destination
kw_args["dst_affine"] = kw_args.pop("src_affine")
kw_args["spatial_size"] = transform[TraceKeys.ORIG_SIZE]
if kw_args.get("align_corners") == TraceKeys.NONE:
kw_args["align_corners"] = False
with self.trace_transform(False):
# we can't use `self.__call__` in case a child class calls this inverse.
out: torch.Tensor = SpatialResample.__call__(self, data, **kw_args)
return out
class ResampleToMatch(SpatialResample):
"""Resample an image to match given metadata. The affine matrix will be aligned,
and the size of the output image will match."""
def update_meta(self, img: torch.Tensor, dst_affine=None, img_dst=None):
if dst_affine is not None:
super().update_meta(img, dst_affine)
if isinstance(img_dst, MetaTensor) and isinstance(img, MetaTensor):
original_fname = img.meta[Key.FILENAME_OR_OBJ]
img.meta = deepcopy(img_dst.meta)
img.meta[Key.FILENAME_OR_OBJ] = original_fname # keep the original name, the others are overwritten
@deprecated_arg(
name="src_meta", since="0.9", msg_suffix="img should be `MetaTensor`, so affine can be extracted directly."
)
@deprecated_arg(
name="dst_meta", since="0.9", msg_suffix="img_dst should be `MetaTensor`, so affine can be extracted directly."
)
def __call__(
self,
img: torch.Tensor,
img_dst: torch.Tensor,
src_meta: Optional[Dict] = None,
dst_meta: Optional[Dict] = None,
mode: Union[str, int, None] = None,
padding_mode: Optional[str] = None,
align_corners: Optional[bool] = None,
dtype: DtypeLike = None,
) -> torch.Tensor:
"""
Args:
img: input image to be resampled to match ``dst_meta``. It currently supports channel-first arrays with
at most three spatial dimensions.
src_meta: Dictionary containing the source affine matrix in the form ``{'affine':src_affine}``.
If ``affine`` is not specified, an identity matrix is assumed. Defaults to ``None``.
See also: https://docs.monai.io/en/stable/transforms.html#spatialresample
dst_meta: Dictionary containing the target affine matrix and target spatial shape in the form
``{'affine':src_affine, 'spatial_shape':spatial_size}``. If ``affine`` is not
specified, ``src_affine`` is assumed. If ``spatial_shape`` is not specified, spatial size is
automatically computed, containing the previous field of view. Defaults to ``None``.
See also: https://docs.monai.io/en/stable/transforms.html#spatialresample
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
Defaults to ``None``, effectively using the value of `self.align_corners`.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype`` or
``np.float64`` (for best precision). If ``None``, use the data type of input data.
To be compatible with other modules, the output data type is always `float32`.
Raises:
RuntimeError: When ``src_meta`` is missing.
RuntimeError: When ``dst_meta`` is missing.
ValueError: When the affine matrix of the source image is not invertible.
Returns:
Resampled input tensor or MetaTensor.
"""
if img_dst is None:
raise RuntimeError("`img_dst` is missing.")
dst_affine = img_dst.affine if isinstance(img_dst, MetaTensor) else torch.eye(4)
img = super().__call__(
img=img,
dst_affine=dst_affine,
spatial_size=img_dst.shape[1:], # skip channel
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
self.update_meta(img, dst_affine=dst_affine, img_dst=img_dst)
return img
class Spacing(InvertibleTransform):
"""
Resample input image into the specified `pixdim`.
"""
backend = SpatialResample.backend
@deprecated_arg(name="image_only", since="0.9")
def __init__(
self,
pixdim: Union[Sequence[float], float, np.ndarray],
diagonal: bool = False,
mode: Union[str, int] = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: DtypeLike = np.float64,
scale_extent: bool = False,
recompute_affine: bool = False,
min_pixdim: Union[Sequence[float], float, np.ndarray, None] = None,
max_pixdim: Union[Sequence[float], float, np.ndarray, None] = None,
image_only: bool = False,
) -> None:
"""
Args:
pixdim: output voxel spacing. if providing a single number, will use it for the first dimension.
items of the pixdim sequence map to the spatial dimensions of input image, if length
of pixdim sequence is longer than image spatial dimensions, will ignore the longer part,
if shorter, will pad with the last value. For example, for 3D image if pixdim is [1.0, 2.0] it
will be padded to [1.0, 2.0, 2.0]
if the components of the `pixdim` are non-positive values, the transform will use the
corresponding components of the original pixdim, which is computed from the `affine`
matrix of input image.
diagonal: whether to resample the input to have a diagonal affine matrix.
If True, the input data is resampled to the following affine::
np.diag((pixdim_0, pixdim_1, ..., pixdim_n, 1))
This effectively resets the volume to the world coordinate system (RAS+ in nibabel).
The original orientation, rotation, shearing are not preserved.
If False, this transform preserves the axes orientation, orthogonal rotation and
translation components from the original affine. This option will not flip/swap axes
of the original data.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``float64`` for best precision.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``float32``.
scale_extent: whether the scale is computed based on the spacing or the full extent of voxels,
default False. The option is ignored if output spatial size is specified when calling this transform.
See also: :py:func:`monai.data.utils.compute_shape_offset`. When this is True, `align_corners`
should be `True` because `compute_shape_offset` already provides the corner alignment shift/scaling.
recompute_affine: whether to recompute affine based on the output shape. The affine computed
analytically does not reflect the potential quantization errors in terms of the output shape.
Set this flag to True to recompute the output affine based on the actual pixdim. Default to ``False``.
min_pixdim: minimal input spacing to be resampled. If provided, input image with a larger spacing than this
value will be kept in its original spacing (not be resampled to `pixdim`). Set it to `None` to use the
value of `pixdim`. Default to `None`.
max_pixdim: maximal input spacing to be resampled. If provided, input image with a smaller spacing than this
value will be kept in its original spacing (not be resampled to `pixdim`). Set it to `None` to use the
value of `pixdim`. Default to `None`.
"""
self.pixdim = np.array(ensure_tuple(pixdim), dtype=np.float64)
self.min_pixdim = np.array(ensure_tuple(min_pixdim), dtype=np.float64)
self.max_pixdim = np.array(ensure_tuple(max_pixdim), dtype=np.float64)
self.diagonal = diagonal
self.scale_extent = scale_extent
self.recompute_affine = recompute_affine
for mn, mx in zip(self.min_pixdim, self.max_pixdim):
if (not np.isnan(mn)) and (not np.isnan(mx)) and ((mx < mn) or (mn < 0)):
raise ValueError(f"min_pixdim {self.min_pixdim} must be positive, smaller than max {self.max_pixdim}.")
self.sp_resample = SpatialResample(
mode=mode, padding_mode=padding_mode, align_corners=align_corners, dtype=dtype
)
@deprecated_arg(name="affine", since="0.9", msg_suffix="Not needed, input should be `MetaTensor`.")
def __call__(
self,
data_array: torch.Tensor,
affine: Optional[NdarrayOrTensor] = None,
mode: Union[str, int, None] = None,
padding_mode: Optional[str] = None,
align_corners: Optional[bool] = None,
dtype: DtypeLike = None,
scale_extent: Optional[bool] = None,
output_spatial_shape: Optional[Union[Sequence[int], np.ndarray, int]] = None,
) -> torch.Tensor:
"""
Args:
data_array: in shape (num_channels, H[, W, ...]).
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``"self.mode"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"self.padding_mode"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
Defaults to ``None``, effectively using the value of `self.align_corners`.
dtype: data type for resampling computation. Defaults to ``self.dtype``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``float32``.
scale_extent: whether the scale is computed based on the spacing or the full extent of voxels,
The option is ignored if output spatial size is specified when calling this transform.
See also: :py:func:`monai.data.utils.compute_shape_offset`. When this is True, `align_corners`
should be `True` because `compute_shape_offset` already provides the corner alignment shift/scaling.
output_spatial_shape: specify the shape of the output data_array. This is typically useful for
the inverse of `Spacingd` where sometimes we could not compute the exact shape due to the quantization
error with the affine.
Raises:
ValueError: When ``data_array`` has no spatial dimensions.
ValueError: When ``pixdim`` is nonpositive.
Returns:
data tensor or MetaTensor (resampled into `self.pixdim`).
"""
original_spatial_shape = data_array.shape[1:]
sr = len(original_spatial_shape)
if sr <= 0:
raise ValueError("data_array must have at least one spatial dimension.")
affine_: np.ndarray
if affine is not None:
warnings.warn("arg `affine` is deprecated, the affine of MetaTensor in data_array has higher priority.")
input_affine = data_array.affine if isinstance(data_array, MetaTensor) else affine
if input_affine is None:
warnings.warn("`data_array` is not of type MetaTensor, assuming affine to be identity.")
# default to identity
input_affine = np.eye(sr + 1, dtype=np.float64)
affine_ = to_affine_nd(sr, convert_data_type(input_affine, np.ndarray)[0])
out_d = self.pixdim[:sr]
if out_d.size < sr:
out_d = np.append(out_d, [out_d[-1]] * (sr - out_d.size))
orig_d = affine_to_spacing(affine_, sr, out_d.dtype)
for idx, (_d, mn, mx) in enumerate(
zip_longest(orig_d, self.min_pixdim[:sr], self.max_pixdim[:sr], fillvalue=np.nan)
):
target = out_d[idx]
mn = target if np.isnan(mn) else min(mn, target)
mx = target if np.isnan(mx) else max(mx, target)
if mn > mx:
raise ValueError(f"min_pixdim is larger than max_pixdim at dim {idx}: min {mn} max {mx} out {target}.")
out_d[idx] = _d if (mn - AFFINE_TOL) <= _d <= (mx + AFFINE_TOL) else target
if not align_corners and scale_extent:
warnings.warn("align_corners=False is not compatible with scale_extent=True.")
# compute output affine, shape and offset
new_affine = zoom_affine(affine_, out_d, diagonal=self.diagonal)
scale_extent = self.scale_extent if scale_extent is None else scale_extent
output_shape, offset = compute_shape_offset(data_array.shape[1:], affine_, new_affine, scale_extent)
new_affine[:sr, -1] = offset[:sr]
# convert to MetaTensor if necessary
data_array = convert_to_tensor(data_array, track_meta=get_track_meta())
if isinstance(data_array, MetaTensor):
data_array.affine = torch.as_tensor(affine_)
# we don't want to track the nested transform otherwise two will be appended
actual_shape = list(output_shape) if output_spatial_shape is None else output_spatial_shape
data_array = self.sp_resample(
data_array,
dst_affine=torch.as_tensor(new_affine),
spatial_size=actual_shape,
mode=mode,
padding_mode=padding_mode,
align_corners=align_corners,
dtype=dtype,
)
if self.recompute_affine and isinstance(data_array, MetaTensor):
data_array.affine = scale_affine(affine_, original_spatial_shape, actual_shape)
return data_array
def inverse(self, data: torch.Tensor) -> torch.Tensor:
return self.sp_resample.inverse(data)
class Orientation(InvertibleTransform):
"""
Change the input image's orientation into the specified based on `axcodes`.
"""
backend = [TransformBackends.NUMPY, TransformBackends.TORCH]
@deprecated_arg(name="image_only", since="0.9")
def __init__(
self,
axcodes: Optional[str] = None,
as_closest_canonical: bool = False,
labels: Optional[Sequence[Tuple[str, str]]] = (("L", "R"), ("P", "A"), ("I", "S")),
image_only: bool = False,
) -> None:
"""
Args:
axcodes: N elements sequence for spatial ND input's orientation.
e.g. axcodes='RAS' represents 3D orientation:
(Left, Right), (Posterior, Anterior), (Inferior, Superior).
default orientation labels options are: 'L' and 'R' for the first dimension,
'P' and 'A' for the second, 'I' and 'S' for the third.
as_closest_canonical: if True, load the image as closest to canonical axis format.
labels: optional, None or sequence of (2,) sequences
(2,) sequences are labels for (beginning, end) of output axis.
Defaults to ``(('L', 'R'), ('P', 'A'), ('I', 'S'))``.
Raises:
ValueError: When ``axcodes=None`` and ``as_closest_canonical=True``. Incompatible values.
See Also: `nibabel.orientations.ornt2axcodes`.
"""
if axcodes is None and not as_closest_canonical:
raise ValueError("Incompatible values: axcodes=None and as_closest_canonical=True.")
if axcodes is not None and as_closest_canonical:
warnings.warn("using as_closest_canonical=True, axcodes ignored.")
self.axcodes = axcodes
self.as_closest_canonical = as_closest_canonical
self.labels = labels
def __call__(self, data_array: torch.Tensor) -> torch.Tensor:
"""
If input type is `MetaTensor`, original affine is extracted with `data_array.affine`.
If input type is `torch.Tensor`, original affine is assumed to be identity.
Args:
data_array: in shape (num_channels, H[, W, ...]).
Raises:
ValueError: When ``data_array`` has no spatial dimensions.
ValueError: When ``axcodes`` spatiality differs from ``data_array``.
Returns:
data_array [reoriented in `self.axcodes`]. Output type will be `MetaTensor`
unless `get_track_meta() == False`, in which case it will be
`torch.Tensor`.
"""
spatial_shape = data_array.shape[1:]
sr = len(spatial_shape)
if sr <= 0:
raise ValueError("data_array must have at least one spatial dimension.")
affine_: np.ndarray
affine_np: np.ndarray
if isinstance(data_array, MetaTensor):
affine_np, *_ = convert_data_type(data_array.affine, np.ndarray)
affine_ = to_affine_nd(sr, affine_np)
else:
warnings.warn("`data_array` is not of type `MetaTensor, assuming affine to be identity.")
# default to identity
affine_np = np.eye(sr + 1, dtype=np.float64)
affine_ = np.eye(sr + 1, dtype=np.float64)
src = nib.io_orientation(affine_)
if self.as_closest_canonical:
spatial_ornt = src
else:
if self.axcodes is None:
raise ValueError("Incompatible values: axcodes=None and as_closest_canonical=True.")
if sr < len(self.axcodes):
warnings.warn(
f"axcodes ('{self.axcodes}') length is smaller than the number of input spatial dimensions D={sr}.\n"
f"{self.__class__.__name__}: input spatial shape is {spatial_shape}, num. channels is {data_array.shape[0]},"
"please make sure the input is in the channel-first format."
)
dst = nib.orientations.axcodes2ornt(self.axcodes[:sr], labels=self.labels)
if len(dst) < sr:
raise ValueError(
f"axcodes must match data_array spatially, got axcodes={len(self.axcodes)}D data_array={sr}D"
)
spatial_ornt = nib.orientations.ornt_transform(src, dst)
new_affine = affine_ @ nib.orientations.inv_ornt_aff(spatial_ornt, spatial_shape)
# convert to MetaTensor if necessary
data_array = convert_to_tensor(data_array, track_meta=get_track_meta())
spatial_ornt[:, 0] += 1 # skip channel dim
spatial_ornt = np.concatenate([np.array([[0, 1]]), spatial_ornt])
axes = [ax for ax, flip in enumerate(spatial_ornt[:, 1]) if flip == -1]
if axes:
data_array = torch.flip(data_array, dims=axes)
full_transpose = np.arange(len(data_array.shape))
full_transpose[: len(spatial_ornt)] = np.argsort(spatial_ornt[:, 0])
if not np.all(full_transpose == np.arange(len(data_array.shape))):
data_array = data_array.permute(full_transpose.tolist())
new_affine = to_affine_nd(affine_np, new_affine)
new_affine, *_ = convert_data_type(new_affine, torch.Tensor, dtype=torch.float32, device=data_array.device)
if get_track_meta():
self.update_meta(data_array, new_affine)
self.push_transform(data_array, extra_info={"original_affine": affine_np})
return data_array
def update_meta(self, img, new_affine):
img.affine = new_affine
def inverse(self, data: torch.Tensor) -> torch.Tensor:
transform = self.pop_transform(data)
# Create inverse transform
orig_affine = transform[TraceKeys.EXTRA_INFO]["original_affine"]
orig_axcodes = nib.orientations.aff2axcodes(orig_affine)
inverse_transform = Orientation(axcodes=orig_axcodes, as_closest_canonical=False, labels=self.labels)
# Apply inverse
with inverse_transform.trace_transform(False):
data = inverse_transform(data)
return data
class Flip(InvertibleTransform):
"""
Reverses the order of elements along the given spatial axis. Preserves shape.
See `torch.flip` documentation for additional details:
https://pytorch.org/docs/stable/generated/torch.flip.html
Args:
spatial_axis: spatial axes along which to flip over. Default is None.
The default `axis=None` will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
"""
backend = [TransformBackends.TORCH]
def __init__(self, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:
self.spatial_axis = spatial_axis
def update_meta(self, img, shape, axes):
# shape and axes include the channel dim
affine = img.affine
mat = convert_to_dst_type(torch.eye(len(affine)), affine)[0]
for axis in axes:
sp = axis - 1
mat[sp, sp], mat[sp, -1] = mat[sp, sp] * -1, shape[axis] - 1
img.affine = affine @ mat
def forward_image(self, img, axes) -> torch.Tensor:
return torch.flip(img, axes)
def __call__(self, img: torch.Tensor) -> torch.Tensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ])
"""
img = convert_to_tensor(img, track_meta=get_track_meta())
axes = map_spatial_axes(img.ndim, self.spatial_axis)
out = self.forward_image(img, axes)
if get_track_meta():
self.update_meta(out, out.shape, axes)
self.push_transform(out)
return out
def inverse(self, data: torch.Tensor) -> torch.Tensor:
self.pop_transform(data)
flipper = Flip(spatial_axis=self.spatial_axis)
with flipper.trace_transform(False):
return flipper(data)
class Resize(InvertibleTransform):
"""
Resize the input image to given spatial size (with scaling, not cropping/padding).
Implemented using :py:class:`torch.nn.functional.interpolate`.
Args:
spatial_size: expected shape of spatial dimensions after resize operation.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
size_mode: should be "all" or "longest", if "all", will use `spatial_size` for all the spatial dims,
if "longest", rescale the image so that only the longest side is equal to specified `spatial_size`,
which must be an int number in this case, keeping the aspect ratio of the initial image, refer to:
https://albumentations.ai/docs/api_reference/augmentations/geometric/resize/
#albumentations.augmentations.geometric.resize.LongestMaxSize.
mode: {``"nearest"``, ``"nearest-exact"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
anti_aliasing: bool
Whether to apply a Gaussian filter to smooth the image prior
to downsampling. It is crucial to filter when downsampling
the image to avoid aliasing artifacts. See also ``skimage.transform.resize``
anti_aliasing_sigma: {float, tuple of floats}, optional
Standard deviation for Gaussian filtering used when anti-aliasing.
By default, this value is chosen as (s - 1) / 2 where s is the
downsampling factor, where s > 1. For the up-size case, s < 1, no
anti-aliasing is performed prior to rescaling.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
spatial_size: Union[Sequence[int], int],
size_mode: str = "all",
mode: str = InterpolateMode.AREA,
align_corners: Optional[bool] = None,
anti_aliasing: bool = False,
anti_aliasing_sigma: Union[Sequence[float], float, None] = None,
) -> None:
self.size_mode = look_up_option(size_mode, ["all", "longest"])
self.spatial_size = spatial_size
self.mode: InterpolateMode = look_up_option(mode, InterpolateMode)
self.align_corners = align_corners
self.anti_aliasing = anti_aliasing
self.anti_aliasing_sigma = anti_aliasing_sigma
def __call__(
self,
img: torch.Tensor,
mode: Optional[str] = None,
align_corners: Optional[bool] = None,
anti_aliasing: Optional[bool] = None,
anti_aliasing_sigma: Union[Sequence[float], float, None] = None,
) -> torch.Tensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]).
mode: {``"nearest"``, ``"nearest-exact"``, ``"linear"``,
``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
anti_aliasing: bool, optional
Whether to apply a Gaussian filter to smooth the image prior
to downsampling. It is crucial to filter when downsampling
the image to avoid aliasing artifacts. See also ``skimage.transform.resize``
anti_aliasing_sigma: {float, tuple of floats}, optional
Standard deviation for Gaussian filtering used when anti-aliasing.
By default, this value is chosen as (s - 1) / 2 where s is the
downsampling factor, where s > 1. For the up-size case, s < 1, no
anti-aliasing is performed prior to rescaling.
Raises:
ValueError: When ``self.spatial_size`` length is less than ``img`` spatial dimensions.
"""
anti_aliasing = self.anti_aliasing if anti_aliasing is None else anti_aliasing
anti_aliasing_sigma = self.anti_aliasing_sigma if anti_aliasing_sigma is None else anti_aliasing_sigma
input_ndim = img.ndim - 1 # spatial ndim
if self.size_mode == "all":
output_ndim = len(ensure_tuple(self.spatial_size))
if output_ndim > input_ndim:
input_shape = ensure_tuple_size(img.shape, output_ndim + 1, 1)
img = img.reshape(input_shape)
elif output_ndim < input_ndim:
raise ValueError(
"len(spatial_size) must be greater or equal to img spatial dimensions, "
f"got spatial_size={output_ndim} img={input_ndim}."
)
spatial_size_ = fall_back_tuple(self.spatial_size, img.shape[1:])
else: # for the "longest" mode
img_size = img.shape[1:]
if not isinstance(self.spatial_size, int):
raise ValueError("spatial_size must be an int number if size_mode is 'longest'.")
scale = self.spatial_size / max(img_size)
spatial_size_ = tuple(int(round(s * scale)) for s in img_size)
original_sp_size = img.shape[1:]
_mode = look_up_option(self.mode if mode is None else mode, InterpolateMode)
_align_corners = self.align_corners if align_corners is None else align_corners
if tuple(img.shape[1:]) == spatial_size_: # spatial shape is already the desired
img = convert_to_tensor(img, track_meta=get_track_meta())
return self._post_process(img, original_sp_size, spatial_size_, _mode, _align_corners, input_ndim)
img_ = convert_to_tensor(img, dtype=torch.float, track_meta=False)
if anti_aliasing and any(x < y for x, y in zip(spatial_size_, img_.shape[1:])):
factors = torch.div(torch.Tensor(list(img_.shape[1:])), torch.Tensor(spatial_size_))
if anti_aliasing_sigma is None:
# if sigma is not given, use the default sigma in skimage.transform.resize
anti_aliasing_sigma = torch.maximum(torch.zeros(factors.shape), (factors - 1) / 2).tolist()
else:
# if sigma is given, use the given value for downsampling axis
anti_aliasing_sigma = list(ensure_tuple_rep(anti_aliasing_sigma, len(spatial_size_)))
for axis in range(len(spatial_size_)):
anti_aliasing_sigma[axis] = anti_aliasing_sigma[axis] * int(factors[axis] > 1)
anti_aliasing_filter = GaussianSmooth(sigma=anti_aliasing_sigma)
img_ = convert_to_tensor(anti_aliasing_filter(img_), track_meta=False)
img = convert_to_tensor(img, track_meta=get_track_meta())
resized = torch.nn.functional.interpolate(
input=img_.unsqueeze(0), size=spatial_size_, mode=_mode, align_corners=_align_corners
)
out, *_ = convert_to_dst_type(resized.squeeze(0), img)
return self._post_process(out, original_sp_size, spatial_size_, _mode, _align_corners, input_ndim)
def _post_process(self, img: torch.Tensor, orig_size, sp_size, mode, align_corners, ndim) -> torch.Tensor:
if get_track_meta():
self.update_meta(img, orig_size, sp_size)
self.push_transform(
img,
orig_size=orig_size,
extra_info={
"mode": mode,
"align_corners": align_corners if align_corners is not None else TraceKeys.NONE,
"new_dim": len(orig_size) - ndim, # additional dims appended
},
)
return img
def update_meta(self, img, spatial_size, new_spatial_size):
affine = convert_to_tensor(img.affine, track_meta=False)
img.affine = scale_affine(affine, spatial_size, new_spatial_size)
def inverse(self, data: torch.Tensor) -> torch.Tensor:
transform = self.pop_transform(data)
return self.inverse_transform(data, transform)
def inverse_transform(self, data: torch.Tensor, transform) -> torch.Tensor:
orig_size = transform[TraceKeys.ORIG_SIZE]
mode = transform[TraceKeys.EXTRA_INFO]["mode"]
align_corners = transform[TraceKeys.EXTRA_INFO]["align_corners"]
xform = Resize(
spatial_size=orig_size, mode=mode, align_corners=None if align_corners == TraceKeys.NONE else align_corners
)
with xform.trace_transform(False):
data = xform(data)
for _ in range(transform[TraceKeys.EXTRA_INFO]["new_dim"]):
data = data.squeeze(-1) # remove the additional dims
return data
class Rotate(InvertibleTransform):
"""
Rotates an input image by given angle using :py:class:`monai.networks.layers.AffineTransform`.
Args:
angle: Rotation angle(s) in radians. should a float for 2D, three floats for 3D.
keep_size: If it is True, the output shape is kept the same as the input.
If it is False, the output shape is adapted so that the
input array is contained completely in the output. Default is True.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to False.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``float32``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``float32``.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
angle: Union[Sequence[float], float],
keep_size: bool = True,
mode: str = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: Union[DtypeLike, torch.dtype] = torch.float32,
) -> None:
self.angle = angle
self.keep_size = keep_size
self.mode: str = look_up_option(mode, GridSampleMode)
self.padding_mode: str = look_up_option(padding_mode, GridSamplePadMode)
self.align_corners = align_corners
self.dtype = dtype
def __call__(
self,
img: torch.Tensor,
mode: Optional[str] = None,
padding_mode: Optional[str] = None,
align_corners: Optional[bool] = None,
dtype: Union[DtypeLike, torch.dtype] = None,
) -> torch.Tensor:
"""
Args:
img: channel first array, must have shape: [chns, H, W] or [chns, H, W, D].
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``float32``.
Raises:
ValueError: When ``img`` spatially is not one of [2D, 3D].
"""
img = convert_to_tensor(img, track_meta=get_track_meta())
_dtype = get_equivalent_dtype(dtype or self.dtype or img.dtype, torch.Tensor)
im_shape = np.asarray(img.shape[1:]) # spatial dimensions
input_ndim = len(im_shape)
if input_ndim not in (2, 3):
raise ValueError(f"Unsupported image dimension: {input_ndim}, available options are [2, 3].")
_angle = ensure_tuple_rep(self.angle, 1 if input_ndim == 2 else 3)
transform = create_rotate(input_ndim, _angle)
shift = create_translate(input_ndim, ((im_shape - 1) / 2).tolist())
if self.keep_size:
output_shape = im_shape
else:
corners = np.asarray(np.meshgrid(*[(0, dim) for dim in im_shape], indexing="ij")).reshape(
(len(im_shape), -1)
)
corners = transform[:-1, :-1] @ corners # type: ignore
output_shape = np.asarray(corners.ptp(axis=1) + 0.5, dtype=int)
shift_1 = create_translate(input_ndim, (-(output_shape - 1) / 2).tolist())
transform = shift @ transform @ shift_1
img_t = img.to(_dtype)
transform_t, *_ = convert_to_dst_type(transform, img_t)
_mode = look_up_option(mode or self.mode, GridSampleMode)
_padding_mode = look_up_option(padding_mode or self.padding_mode, GridSamplePadMode)
_align_corners = self.align_corners if align_corners is None else align_corners
xform = AffineTransform(
normalized=False,
mode=_mode,
padding_mode=_padding_mode,
align_corners=_align_corners,
reverse_indexing=True,
)
output: torch.Tensor = xform(img_t.unsqueeze(0), transform_t, spatial_size=output_shape).float().squeeze(0)
out, *_ = convert_to_dst_type(output, dst=img, dtype=output.dtype)
if get_track_meta():
self.update_meta(out, transform_t)
self.push_transform(
out,
orig_size=img_t.shape[1:],
extra_info={
"rot_mat": transform,
"mode": _mode,
"padding_mode": _padding_mode,
"align_corners": _align_corners if _align_corners is not None else TraceKeys.NONE,
"dtype": str(_dtype)[6:], # dtype as string; remove "torch": torch.float32 -> float32
},
)
return out
def update_meta(self, img, rotate_mat):
affine = convert_to_tensor(img.affine, track_meta=False)
mat = to_affine_nd(len(affine) - 1, rotate_mat)
img.affine = affine @ convert_to_dst_type(mat, affine)[0]
def inverse(self, data: torch.Tensor) -> torch.Tensor:
transform = self.pop_transform(data)
return self.inverse_transform(data, transform)
def inverse_transform(self, data: torch.Tensor, transform) -> torch.Tensor:
fwd_rot_mat = transform[TraceKeys.EXTRA_INFO]["rot_mat"]
mode = transform[TraceKeys.EXTRA_INFO]["mode"]
padding_mode = transform[TraceKeys.EXTRA_INFO]["padding_mode"]
align_corners = transform[TraceKeys.EXTRA_INFO]["align_corners"]
dtype = transform[TraceKeys.EXTRA_INFO]["dtype"]
inv_rot_mat = linalg_inv(fwd_rot_mat)
xform = AffineTransform(
normalized=False,
mode=mode,
padding_mode=padding_mode,
align_corners=False if align_corners == TraceKeys.NONE else align_corners,
reverse_indexing=True,
)
img_t: torch.Tensor = convert_data_type(data, MetaTensor, dtype=dtype)[0]
transform_t, *_ = convert_to_dst_type(inv_rot_mat, img_t)
sp_size = transform[TraceKeys.ORIG_SIZE]
out: torch.Tensor = xform(img_t.unsqueeze(0), transform_t, spatial_size=sp_size).float().squeeze(0)
out = convert_to_dst_type(out, dst=data, dtype=out.dtype)[0]
if isinstance(data, MetaTensor):
self.update_meta(out, transform_t)
return out
class Zoom(InvertibleTransform):
"""
Zooms an ND image using :py:class:`torch.nn.functional.interpolate`.
For details, please see https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html.
Different from :py:class:`monai.transforms.resize`, this transform takes scaling factors
as input, and provides an option of preserving the input spatial size.
Args:
zoom: The zoom factor along the spatial axes.
If a float, zoom is the same for each spatial axis.
If a sequence, zoom should contain one value for each spatial axis.
mode: {``"nearest"``, ``"nearest-exact"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"edge"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
keep_size: Should keep original size (padding/slicing if needed), default is True.
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
zoom: Union[Sequence[float], float],
mode: str = InterpolateMode.AREA,
padding_mode: str = NumpyPadMode.EDGE,
align_corners: Optional[bool] = None,
keep_size: bool = True,
**kwargs,
) -> None:
self.zoom = zoom
self.mode: InterpolateMode = InterpolateMode(mode)
self.padding_mode = padding_mode
self.align_corners = align_corners
self.keep_size = keep_size
self.kwargs = kwargs
def __call__(
self,
img: torch.Tensor,
mode: Optional[str] = None,
padding_mode: Optional[str] = None,
align_corners: Optional[bool] = None,
) -> torch.Tensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]).
mode: {``"nearest"``, ``"nearest-exact"``, ``"linear"``,
``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"edge"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
"""
img = convert_to_tensor(img, track_meta=get_track_meta())
img_t = img.to(torch.float32)
_zoom = ensure_tuple_rep(self.zoom, img.ndim - 1) # match the spatial image dim
_mode = look_up_option(self.mode if mode is None else mode, InterpolateMode).value
_align_corners = self.align_corners if align_corners is None else align_corners
_padding_mode = padding_mode or self.padding_mode
zoomed: NdarrayOrTensor = torch.nn.functional.interpolate(
recompute_scale_factor=True,
input=img_t.unsqueeze(0),
scale_factor=list(_zoom),
mode=_mode,
align_corners=_align_corners,
)
zoomed = zoomed.squeeze(0)
orig_size, z_size = img_t.shape, zoomed.shape
out, *_ = convert_to_dst_type(zoomed, dst=img)
if get_track_meta():
self.update_meta(out, orig_size[1:], z_size[1:])
do_pad_crop = self.keep_size and not np.allclose(orig_size, z_size)
if do_pad_crop:
_pad_crop = ResizeWithPadOrCrop(spatial_size=img_t.shape[1:], mode=_padding_mode)
out = _pad_crop(out)
if get_track_meta():
padcrop_xform = self.pop_transform(out, check=False) if do_pad_crop else {}
self.push_transform(
out,
orig_size=orig_size[1:],
extra_info={
"mode": _mode,
"align_corners": _align_corners if _align_corners is not None else TraceKeys.NONE,
"do_padcrop": do_pad_crop,
"padcrop": padcrop_xform,
},
)
return out
def update_meta(self, img, spatial_size, new_spatial_size):
affine = convert_to_tensor(img.affine, track_meta=False)
img.affine = scale_affine(affine, spatial_size, new_spatial_size)
def inverse(self, data: torch.Tensor) -> torch.Tensor:
transform = self.pop_transform(data)
return self.inverse_transform(data, transform)
def inverse_transform(self, data: torch.Tensor, transform) -> torch.Tensor:
if transform[TraceKeys.EXTRA_INFO]["do_padcrop"]:
orig_size = transform[TraceKeys.ORIG_SIZE]
pad_or_crop = ResizeWithPadOrCrop(spatial_size=orig_size, mode="edge")
padcrop_xform = transform[TraceKeys.EXTRA_INFO]["padcrop"]
padcrop_xform[TraceKeys.EXTRA_INFO]["pad_info"][TraceKeys.ID] = TraceKeys.NONE
padcrop_xform[TraceKeys.EXTRA_INFO]["crop_info"][TraceKeys.ID] = TraceKeys.NONE
# this uses inverse because spatial_size // 2 in the forward pass of center crop may cause issues
data = pad_or_crop.inverse_transform(data, padcrop_xform) # type: ignore
# Create inverse transform
mode = transform[TraceKeys.EXTRA_INFO]["mode"]
align_corners = transform[TraceKeys.EXTRA_INFO]["align_corners"]
inverse_transform = Resize(spatial_size=transform[TraceKeys.ORIG_SIZE])
# Apply inverse
with inverse_transform.trace_transform(False):
out = inverse_transform(
data, mode=mode, align_corners=None if align_corners == TraceKeys.NONE else align_corners
)
return out
class Rotate90(InvertibleTransform):
"""
Rotate an array by 90 degrees in the plane specified by `axes`.
See `torch.rot90` for additional details:
https://pytorch.org/docs/stable/generated/torch.rot90.html#torch-rot90.
"""
backend = [TransformBackends.TORCH]
def __init__(self, k: int = 1, spatial_axes: Tuple[int, int] = (0, 1)) -> None:
"""
Args:
k: number of times to rotate by 90 degrees.
spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
Default: (0, 1), this is the first two axis in spatial dimensions.
If axis is negative it counts from the last to the first axis.
"""
self.k = k
spatial_axes_: Tuple[int, int] = ensure_tuple(spatial_axes) # type: ignore
if len(spatial_axes_) != 2:
raise ValueError("spatial_axes must be 2 int numbers to indicate the axes to rotate 90 degrees.")
self.spatial_axes = spatial_axes_
def __call__(self, img: torch.Tensor) -> torch.Tensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
"""
img = convert_to_tensor(img, track_meta=get_track_meta())
axes = map_spatial_axes(img.ndim, self.spatial_axes)
ori_shape = img.shape[1:]
out: NdarrayOrTensor = torch.rot90(img, self.k, axes)
out = convert_to_dst_type(out, img)[0]
if get_track_meta():
self.update_meta(out, ori_shape, out.shape[1:], axes, self.k)
self.push_transform(out, extra_info={"axes": [d - 1 for d in axes], "k": self.k}) # compensate spatial dim
return out
def update_meta(self, img, spatial_size, new_spatial_size, axes, k):
affine = convert_data_type(img.affine, torch.Tensor)[0]
r, sp_r = len(affine) - 1, len(spatial_size)
mat = to_affine_nd(r, create_translate(sp_r, [-float(d - 1) / 2 for d in new_spatial_size]))
s = -1.0 if int(axes[0]) - int(axes[1]) in (-1, 2) else 1.0
if sp_r == 2:
rot90 = to_affine_nd(r, create_rotate(sp_r, [s * np.pi / 2]))
else:
idx = {1, 2, 3} - set(axes)
angle = [0, 0, 0]
angle[idx.pop() - 1] = s * np.pi / 2
rot90 = to_affine_nd(r, create_rotate(sp_r, angle))
for _ in range(k):
mat = rot90 @ mat
mat = to_affine_nd(r, create_translate(sp_r, [float(d - 1) / 2 for d in spatial_size])) @ mat
img.affine = affine @ convert_to_dst_type(mat, affine)[0]
def inverse(self, data: torch.Tensor) -> torch.Tensor:
transform = self.pop_transform(data)
return self.inverse_transform(data, transform)
def inverse_transform(self, data: torch.Tensor, transform) -> torch.Tensor:
axes = transform[TraceKeys.EXTRA_INFO]["axes"]
k = transform[TraceKeys.EXTRA_INFO]["k"]
inv_k = 4 - k % 4
xform = Rotate90(k=inv_k, spatial_axes=axes)
with xform.trace_transform(False):
return xform(data)
class RandRotate90(RandomizableTransform, InvertibleTransform):
"""
With probability `prob`, input arrays are rotated by 90 degrees
in the plane specified by `spatial_axes`.
"""
backend = Rotate90.backend
def __init__(self, prob: float = 0.1, max_k: int = 3, spatial_axes: Tuple[int, int] = (0, 1)) -> None:
"""
Args:
prob: probability of rotating.
(Default 0.1, with 10% probability it returns a rotated array)
max_k: number of rotations will be sampled from `np.random.randint(max_k) + 1`, (Default 3).
spatial_axes: 2 int numbers, defines the plane to rotate with 2 spatial axes.
Default: (0, 1), this is the first two axis in spatial dimensions.
"""
RandomizableTransform.__init__(self, prob)
self.max_k = max_k
self.spatial_axes = spatial_axes
self._rand_k = 0
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._rand_k = self.R.randint(self.max_k) + 1
def __call__(self, img: torch.Tensor, randomize: bool = True) -> torch.Tensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize()
if self._do_transform:
out = Rotate90(self._rand_k, self.spatial_axes)(img)
else:
out = convert_to_tensor(img, track_meta=get_track_meta())
if get_track_meta():
maybe_rot90_info = self.pop_transform(out, check=False) if self._do_transform else {}
self.push_transform(out, extra_info=maybe_rot90_info)
return out
def inverse(self, data: torch.Tensor) -> torch.Tensor:
xform_info = self.pop_transform(data)
if not xform_info[TraceKeys.DO_TRANSFORM]:
return data
rotate_xform = xform_info[TraceKeys.EXTRA_INFO]
return Rotate90().inverse_transform(data, rotate_xform)
class RandRotate(RandomizableTransform, InvertibleTransform):
"""
Randomly rotate the input arrays.
Args:
range_x: Range of rotation angle in radians in the plane defined by the first and second axes.
If single number, angle is uniformly sampled from (-range_x, range_x).
range_y: Range of rotation angle in radians in the plane defined by the first and third axes.
If single number, angle is uniformly sampled from (-range_y, range_y). only work for 3D data.
range_z: Range of rotation angle in radians in the plane defined by the second and third axes.
If single number, angle is uniformly sampled from (-range_z, range_z). only work for 3D data.
prob: Probability of rotation.
keep_size: If it is False, the output shape is adapted so that the
input array is contained completely in the output.
If it is True, the output shape is the same as the input. Default is True.
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to False.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``float32``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``float32``.
"""
backend = Rotate.backend
def __init__(
self,
range_x: Union[Tuple[float, float], float] = 0.0,
range_y: Union[Tuple[float, float], float] = 0.0,
range_z: Union[Tuple[float, float], float] = 0.0,
prob: float = 0.1,
keep_size: bool = True,
mode: str = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.BORDER,
align_corners: bool = False,
dtype: Union[DtypeLike, torch.dtype] = np.float32,
) -> None:
RandomizableTransform.__init__(self, prob)
self.range_x = ensure_tuple(range_x)
if len(self.range_x) == 1:
self.range_x = tuple(sorted([-self.range_x[0], self.range_x[0]]))
self.range_y = ensure_tuple(range_y)
if len(self.range_y) == 1:
self.range_y = tuple(sorted([-self.range_y[0], self.range_y[0]]))
self.range_z = ensure_tuple(range_z)
if len(self.range_z) == 1:
self.range_z = tuple(sorted([-self.range_z[0], self.range_z[0]]))
self.keep_size = keep_size
self.mode: str = look_up_option(mode, GridSampleMode)
self.padding_mode: str = look_up_option(padding_mode, GridSamplePadMode)
self.align_corners = align_corners
self.dtype = dtype
self.x = 0.0
self.y = 0.0
self.z = 0.0
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.x = self.R.uniform(low=self.range_x[0], high=self.range_x[1])
self.y = self.R.uniform(low=self.range_y[0], high=self.range_y[1])
self.z = self.R.uniform(low=self.range_z[0], high=self.range_z[1])
@deprecated_arg(name="get_matrix", since="0.9", msg_suffix="please use `img.meta` instead.")
def __call__(
self,
img: torch.Tensor,
mode: Optional[str] = None,
padding_mode: Optional[str] = None,
align_corners: Optional[bool] = None,
dtype: Union[DtypeLike, torch.dtype] = None,
randomize: bool = True,
get_matrix: bool = False,
):
"""
Args:
img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D).
mode: {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
align_corners: Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
If None, use the data type of input data. To be compatible with other modules,
the output data type is always ``float32``.
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize()
if self._do_transform:
rotator = Rotate(
angle=self.x if img.ndim == 3 else (self.x, self.y, self.z),
keep_size=self.keep_size,
mode=look_up_option(mode or self.mode, GridSampleMode),
padding_mode=look_up_option(padding_mode or self.padding_mode, GridSamplePadMode),
align_corners=self.align_corners if align_corners is None else align_corners,
dtype=dtype or self.dtype or img.dtype,
)
out = rotator(img)
else:
out = convert_to_tensor(img, track_meta=get_track_meta(), dtype=torch.float32)
if get_track_meta():
rot_info = self.pop_transform(out, check=False) if self._do_transform else {}
self.push_transform(out, extra_info=rot_info)
return out
def inverse(self, data: torch.Tensor) -> torch.Tensor:
xform_info = self.pop_transform(data)
if not xform_info[TraceKeys.DO_TRANSFORM]:
return data
return Rotate(0).inverse_transform(data, xform_info[TraceKeys.EXTRA_INFO])
class RandFlip(RandomizableTransform, InvertibleTransform):
"""
Randomly flips the image along axes. Preserves shape.
See numpy.flip for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
prob: Probability of flipping.
spatial_axis: Spatial axes along which to flip over. Default is None.
"""
backend = Flip.backend
def __init__(self, prob: float = 0.1, spatial_axis: Optional[Union[Sequence[int], int]] = None) -> None:
RandomizableTransform.__init__(self, prob)
self.flipper = Flip(spatial_axis=spatial_axis)
def __call__(self, img: torch.Tensor, randomize: bool = True) -> torch.Tensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ]),
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize(None)
out = self.flipper(img) if self._do_transform else img
out = convert_to_tensor(out, track_meta=get_track_meta())
if get_track_meta():
xform_info = self.pop_transform(out, check=False) if self._do_transform else {}
self.push_transform(out, extra_info=xform_info)
return out
def inverse(self, data: torch.Tensor) -> torch.Tensor:
transform = self.pop_transform(data)
if not transform[TraceKeys.DO_TRANSFORM]:
return data
data.applied_operations.append(transform[TraceKeys.EXTRA_INFO]) # type: ignore
return self.flipper.inverse(data)
class RandAxisFlip(RandomizableTransform, InvertibleTransform):
"""
Randomly select a spatial axis and flip along it.
See numpy.flip for additional details.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html
Args:
prob: Probability of flipping.
"""
backend = Flip.backend
def __init__(self, prob: float = 0.1) -> None:
RandomizableTransform.__init__(self, prob)
self._axis: Optional[int] = None
self.flipper = Flip(spatial_axis=self._axis)
def randomize(self, data: NdarrayOrTensor) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._axis = self.R.randint(data.ndim - 1)
def __call__(self, img: torch.Tensor, randomize: bool = True) -> torch.Tensor:
"""
Args:
img: channel first array, must have shape: (num_channels, H[, W, ..., ])
randomize: whether to execute `randomize()` function first, default to True.
"""
if randomize:
self.randomize(data=img)
if self._do_transform:
self.flipper.spatial_axis = self._axis
out = self.flipper(img)
else:
out = convert_to_tensor(img, track_meta=get_track_meta())
if get_track_meta():
xform = self.pop_transform(out, check=False) if self._do_transform else {}
xform["axes"] = self._axis
self.push_transform(out, extra_info=xform)
return out
def inverse(self, data: torch.Tensor) -> torch.Tensor:
transform = self.pop_transform(data)
if not transform[TraceKeys.DO_TRANSFORM]:
return data
flipper = Flip(spatial_axis=transform[TraceKeys.EXTRA_INFO]["axes"])
with flipper.trace_transform(False):
return flipper(data)
class RandZoom(RandomizableTransform, InvertibleTransform):
"""
Randomly zooms input arrays with given probability within given zoom range.
Args:
prob: Probability of zooming.
min_zoom: Min zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
to keep the original spatial shape ratio.
If a sequence, min_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
max_zoom: Max zoom factor. Can be float or sequence same size as image.
If a float, select a random factor from `[min_zoom, max_zoom]` then apply to all spatial dims
to keep the original spatial shape ratio.
If a sequence, max_zoom should contain one value for each spatial axis.
If 2 values provided for 3D data, use the first value for both H & W dims to keep the same zoom ratio.
mode: {``"nearest"``, ``"nearest-exact"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode. Defaults to ``"area"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Default: None.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
keep_size: Should keep original size (pad if needed), default is True.
kwargs: other arguments for the `np.pad` or `torch.pad` function.
note that `np.pad` treats channel dimension as the first dimension.
"""
backend = Zoom.backend
def __init__(
self,
prob: float = 0.1,
min_zoom: Union[Sequence[float], float] = 0.9,
max_zoom: Union[Sequence[float], float] = 1.1,
mode: str = InterpolateMode.AREA,
padding_mode: str = NumpyPadMode.EDGE,
align_corners: Optional[bool] = None,
keep_size: bool = True,
**kwargs,
) -> None:
RandomizableTransform.__init__(self, prob)
self.min_zoom = ensure_tuple(min_zoom)
self.max_zoom = ensure_tuple(max_zoom)
if len(self.min_zoom) != len(self.max_zoom):
raise AssertionError("min_zoom and max_zoom must have same length.")
self.mode: InterpolateMode = look_up_option(mode, InterpolateMode)
self.padding_mode = padding_mode
self.align_corners = align_corners
self.keep_size = keep_size
self.kwargs = kwargs
self._zoom: Sequence[float] = [1.0]
def randomize(self, img: NdarrayOrTensor) -> None:
super().randomize(None)
if not self._do_transform:
return None
self._zoom = [self.R.uniform(l, h) for l, h in zip(self.min_zoom, self.max_zoom)]
if len(self._zoom) == 1:
# to keep the spatial shape ratio, use same random zoom factor for all dims
self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 1)
elif len(self._zoom) == 2 and img.ndim > 3:
# if 2 zoom factors provided for 3D data, use the first factor for H and W dims, second factor for D dim
self._zoom = ensure_tuple_rep(self._zoom[0], img.ndim - 2) + ensure_tuple(self._zoom[-1])
def __call__(
self,
img: torch.Tensor,
mode: Optional[str] = None,
padding_mode: Optional[str] = None,
align_corners: Optional[bool] = None,
randomize: bool = True,
) -> torch.Tensor:
"""
Args:
img: channel first array, must have shape 2D: (nchannels, H, W), or 3D: (nchannels, H, W, D).
mode: {``"nearest"``, ``"nearest-exact"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``,
``"area"``}, the interpolation mode. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
padding_mode: available modes for numpy array:{``"constant"``, ``"edge"``, ``"linear_ramp"``, ``"maximum"``,
``"mean"``, ``"median"``, ``"minimum"``, ``"reflect"``, ``"symmetric"``, ``"wrap"``, ``"empty"``}
available modes for PyTorch Tensor: {``"constant"``, ``"reflect"``, ``"replicate"``, ``"circular"``}.
One of the listed string values or a user supplied function. Defaults to ``"constant"``.
The mode to pad data after zooming.
See also: https://numpy.org/doc/1.18/reference/generated/numpy.pad.html
https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html
align_corners: This only has an effect when mode is
'linear', 'bilinear', 'bicubic' or 'trilinear'. Defaults to ``self.align_corners``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.interpolate.html
randomize: whether to execute `randomize()` function first, default to True.
"""
# match the spatial image dim
if randomize:
self.randomize(img=img)
if not self._do_transform:
out = convert_to_tensor(img, track_meta=get_track_meta(), dtype=torch.float32)
else:
out = Zoom(
self._zoom,
keep_size=self.keep_size,
mode=look_up_option(mode or self.mode, InterpolateMode),
padding_mode=padding_mode or self.padding_mode,
align_corners=self.align_corners if align_corners is None else align_corners,
**self.kwargs,
)(img)
if get_track_meta():
z_info = self.pop_transform(out, check=False) if self._do_transform else {}
self.push_transform(out, extra_info=z_info)
return out # type: ignore
def inverse(self, data: torch.Tensor) -> torch.Tensor:
xform_info = self.pop_transform(data)
if not xform_info[TraceKeys.DO_TRANSFORM]:
return data
return Zoom(self._zoom).inverse_transform(data, xform_info[TraceKeys.EXTRA_INFO])
class AffineGrid(Transform):
"""
Affine transforms on the coordinates.
Args:
rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.
Defaults to no rotation.
shear_params: shearing factors for affine matrix, take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.
translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in
pixel/voxel relative to the center of the input image. Defaults to no translation.
scale_params: scale factor for every spatial dims. a tuple of 2 floats for 2D,
a tuple of 3 floats for 3D. Defaults to `1.0`.
dtype: data type for the grid computation. Defaults to ``float32``.
If ``None``, use the data type of input data (if `grid` is provided).
device: device on which the tensor will be allocated, if a new grid is generated.
affine: If applied, ignore the params (`rotate_params`, etc.) and use the
supplied matrix. Should be square with each side = num of image spatial
dimensions + 1.
"""
backend = [TransformBackends.TORCH]
def __init__(
self,
rotate_params: Optional[Union[Sequence[float], float]] = None,
shear_params: Optional[Union[Sequence[float], float]] = None,
translate_params: Optional[Union[Sequence[float], float]] = None,
scale_params: Optional[Union[Sequence[float], float]] = None,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float32,
affine: Optional[NdarrayOrTensor] = None,
) -> None:
self.rotate_params = rotate_params
self.shear_params = shear_params
self.translate_params = translate_params
self.scale_params = scale_params
self.device = device
self.dtype = dtype
self.affine = affine
def __call__(
self, spatial_size: Optional[Sequence[int]] = None, grid: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
The grid can be initialized with a `spatial_size` parameter, or provided directly as `grid`.
Therefore, either `spatial_size` or `grid` must be provided.
When initialising from `spatial_size`, the backend "torch" will be used.
Args:
spatial_size: output grid size.
grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.
Raises:
ValueError: When ``grid=None`` and ``spatial_size=None``. Incompatible values.
"""
if grid is None: # create grid from spatial_size
if spatial_size is None:
raise ValueError("Incompatible values: grid=None and spatial_size=None.")
grid_ = create_grid(spatial_size, device=self.device, backend="torch", dtype=self.dtype)
else:
grid_ = grid
_dtype = self.dtype or grid_.dtype
grid_: torch.Tensor = convert_to_tensor(grid_, dtype=_dtype, track_meta=get_track_meta()) # type: ignore
_b = TransformBackends.TORCH
_device = grid_.device # type: ignore
affine: NdarrayOrTensor
if self.affine is None:
spatial_dims = len(grid_.shape) - 1
affine = torch.eye(spatial_dims + 1, device=_device)
if self.rotate_params:
affine = affine @ create_rotate(spatial_dims, self.rotate_params, device=_device, backend=_b)
if self.shear_params:
affine = affine @ create_shear(spatial_dims, self.shear_params, device=_device, backend=_b)
if self.translate_params:
affine = affine @ create_translate(spatial_dims, self.translate_params, device=_device, backend=_b)
if self.scale_params:
affine = affine @ create_scale(spatial_dims, self.scale_params, device=_device, backend=_b)
else:
affine = self.affine
affine = to_affine_nd(len(grid_) - 1, affine)
affine = convert_to_tensor(affine, device=grid_.device, dtype=grid_.dtype, track_meta=False) # type: ignore
grid_ = (affine @ grid_.reshape((grid_.shape[0], -1))).reshape([-1] + list(grid_.shape[1:]))
return grid_, affine # type: ignore
class RandAffineGrid(Randomizable, Transform):
"""
Generate randomised affine grid.
"""
backend = AffineGrid.backend
def __init__(
self,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 2 floats for 2D, a tuple of 6 floats for 3D) for affine matrix,
take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select voxels to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
device: device to store the output grid data.
See also:
- :py:meth:`monai.transforms.utils.create_rotate`
- :py:meth:`monai.transforms.utils.create_shear`
- :py:meth:`monai.transforms.utils.create_translate`
- :py:meth:`monai.transforms.utils.create_scale`
"""
self.rotate_range = ensure_tuple(rotate_range)
self.shear_range = ensure_tuple(shear_range)
self.translate_range = ensure_tuple(translate_range)
self.scale_range = ensure_tuple(scale_range)
self.rotate_params: Optional[List[float]] = None
self.shear_params: Optional[List[float]] = None
self.translate_params: Optional[List[float]] = None
self.scale_params: Optional[List[float]] = None
self.device = device
self.affine: Optional[torch.Tensor] = torch.eye(4, dtype=torch.float64)
def _get_rand_param(self, param_range, add_scalar: float = 0.0):
out_param = []
for f in param_range:
if issequenceiterable(f):
if len(f) != 2:
raise ValueError("If giving range as [min,max], should only have two elements per dim.")
out_param.append(self.R.uniform(f[0], f[1]) + add_scalar)
elif f is not None:
out_param.append(self.R.uniform(-f, f) + add_scalar)
return out_param
def randomize(self, data: Optional[Any] = None) -> None:
self.rotate_params = self._get_rand_param(self.rotate_range)
self.shear_params = self._get_rand_param(self.shear_range)
self.translate_params = self._get_rand_param(self.translate_range)
self.scale_params = self._get_rand_param(self.scale_range, 1.0)
def __call__(
self,
spatial_size: Optional[Sequence[int]] = None,
grid: Optional[NdarrayOrTensor] = None,
randomize: bool = True,
) -> torch.Tensor:
"""
Args:
spatial_size: output grid size.
grid: grid to be transformed. Shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.
randomize: boolean as to whether the grid parameters governing the grid should be randomized.
Returns:
a 2D (3xHxW) or 3D (4xHxWxD) grid.
"""
if randomize:
self.randomize()
affine_grid = AffineGrid(
rotate_params=self.rotate_params,
shear_params=self.shear_params,
translate_params=self.translate_params,
scale_params=self.scale_params,
device=self.device,
)
_grid: torch.Tensor
_grid, self.affine = affine_grid(spatial_size, grid) # type: ignore
return _grid
def get_transformation_matrix(self) -> Optional[torch.Tensor]:
"""Get the most recently applied transformation matrix"""
return self.affine
class RandDeformGrid(Randomizable, Transform):
"""
Generate random deformation grid.
"""
backend = [TransformBackends.TORCH]
@deprecated_arg(name="as_tensor_output", since="0.8")
def __init__(
self,
spacing: Union[Sequence[float], float],
magnitude_range: Tuple[float, float],
as_tensor_output: bool = True,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
spacing: spacing of the grid in 2D or 3D.
e.g., spacing=(1, 1) indicates pixel-wise deformation in 2D,
spacing=(1, 1, 1) indicates voxel-wise deformation in 3D,
spacing=(2, 2) indicates deformation field defined on every other pixel in 2D.
magnitude_range: the random offsets will be generated from
`uniform[magnitude[0], magnitude[1])`.
device: device to store the output grid data.
"""
self.spacing = spacing
self.magnitude = magnitude_range
self.rand_mag = 1.0
self.random_offset: np.ndarray
self.device = device
def randomize(self, grid_size: Sequence[int]) -> None:
self.random_offset = self.R.normal(size=([len(grid_size)] + list(grid_size))).astype(np.float32, copy=False)
self.rand_mag = self.R.uniform(self.magnitude[0], self.magnitude[1])
def __call__(self, spatial_size: Sequence[int]) -> torch.Tensor:
"""
Args:
spatial_size: spatial size of the grid.
"""
self.spacing = fall_back_tuple(self.spacing, (1.0,) * len(spatial_size))
control_grid = create_control_grid(spatial_size, self.spacing, device=self.device, backend="torch")
self.randomize(control_grid.shape[1:])
_offset, *_ = convert_to_dst_type(self.rand_mag * self.random_offset, control_grid)
control_grid[: len(spatial_size)] += _offset
return control_grid # type: ignore
class Resample(Transform):
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
mode: Union[str, int] = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.BORDER,
norm_coords: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float64,
) -> None:
"""
computes output image using values from `img`, locations from `grid` using pytorch.
supports spatially 2D or 3D (num_channels, H, W[, D]).
Args:
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull (experimental).
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses an integer to represent the padding mode.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull (experimental).
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
norm_coords: whether to normalize the coordinates from `[-(size-1)/2, (size-1)/2]` to
`[0, size - 1]` (for ``monai/csrc`` implementation) or
`[-1, 1]` (for torch ``grid_sample`` implementation) to be compatible with the underlying
resampling API.
device: device on which the tensor will be allocated.
dtype: data type for resampling computation. Defaults to ``float64`` for best precision.
If ``None``, use the data type of input data. To be compatible with other modules,
the output data type is always `float32`.
"""
self.mode = mode
self.padding_mode = padding_mode
self.norm_coords = norm_coords
self.device = device
self.dtype = dtype
def __call__(
self,
img: torch.Tensor,
grid: Optional[torch.Tensor] = None,
mode: Union[str, int, None] = None,
padding_mode: Optional[str] = None,
dtype: DtypeLike = None,
) -> torch.Tensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]).
grid: shape must be (3, H, W) for 2D or (4, H, W, D) for 3D.
if ``norm_coords`` is True, the grid values must be in `[-(size-1)/2, (size-1)/2]`.
if ``USE_COMPILED=True`` and ``norm_coords=False``, grid values must be in `[0, size-1]`.
if ``USE_COMPILED=False`` and ``norm_coords=False``, grid values must be in `[-1, 1]`.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses
``"nearest"``, ``"bilinear"``, ``"bicubic"`` to indicate 0, 1, 3 order interpolations.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull (experimental).
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `USE_COMPILED` is `True`, this argument uses an integer to represent the padding mode.
See also: https://docs.monai.io/en/stable/networks.html#grid-pull (experimental).
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
dtype: data type for resampling computation. Defaults to ``self.dtype``.
To be compatible with other modules, the output data type is always `float32`.
See also:
:py:const:`monai.config.USE_COMPILED`
"""
img = convert_to_tensor(img, track_meta=get_track_meta())
if grid is None:
return img
_device = img.device if isinstance(img, torch.Tensor) else self.device
_dtype = dtype or self.dtype or img.dtype
img_t, *_ = convert_data_type(img, torch.Tensor, dtype=_dtype, device=_device)
grid_t, *_ = convert_to_dst_type(grid, img_t, dtype=grid.dtype, wrap_sequence=True)
grid_t = grid_t.clone(memory_format=torch.contiguous_format)
if self.norm_coords:
grid_t[-1] = where(grid_t[-1] != 0, grid_t[-1], 1.0) # type: ignore
sr = min(len(img_t.shape[1:]), 3)
_interp_mode = self.mode if mode is None else mode
_padding_mode = self.padding_mode if padding_mode is None else padding_mode
if look_up_option(str(_interp_mode), SplineMode, default=None) is not None:
self._backend = TransformBackends.NUMPY
else:
self._backend = TransformBackends.TORCH
if USE_COMPILED or self._backend == TransformBackends.NUMPY:
if self.norm_coords:
for i, dim in enumerate(img_t.shape[1 : 1 + sr]):
grid_t[i] = (max(dim, 2) / 2.0 - 0.5 + grid_t[i]) / grid_t[-1:]
grid_t = grid_t[:sr]
if USE_COMPILED and self._backend == TransformBackends.TORCH: # compiled is using torch backend param name
grid_t = moveaxis(grid_t, 0, -1) # type: ignore
bound = 1 if _padding_mode == "reflection" else _padding_mode
if _interp_mode == "bicubic":
interp = 3
elif _interp_mode == "bilinear":
interp = 1
else:
interp = GridSampleMode(_interp_mode) # type: ignore
out = grid_pull(
img_t.unsqueeze(0),
grid_t.unsqueeze(0).to(img_t),
bound=bound,
extrapolate=True,
interpolation=interp,
)[0]
elif self._backend == TransformBackends.NUMPY:
is_cuda = img_t.is_cuda
img_np = (convert_to_cupy if is_cuda else convert_to_numpy)(img_t, wrap_sequence=True)
grid_np, *_ = convert_to_dst_type(grid_t, img_np, wrap_sequence=True)
_map_coord = (cupy_ndi if is_cuda else np_ndi).map_coordinates
out = (cupy if is_cuda else np).stack(
[
_map_coord(c, grid_np, order=int(_interp_mode), mode=look_up_option(_padding_mode, NdimageMode))
for c in img_np
]
)
out = convert_to_dst_type(out, img_t)[0]
else:
if self.norm_coords:
for i, dim in enumerate(img_t.shape[1 : 1 + sr]):
grid_t[i] = 2.0 / (max(2, dim) - 1.0) * grid_t[i] / grid_t[-1:]
index_ordering: List[int] = list(range(sr - 1, -1, -1))
grid_t = moveaxis(grid_t[index_ordering], 0, -1) # type: ignore
out = torch.nn.functional.grid_sample(
img_t.unsqueeze(0),
grid_t.unsqueeze(0).to(img_t),
mode=GridSampleMode(_interp_mode),
padding_mode=GridSamplePadMode(_padding_mode),
align_corners=True,
)[0]
out_val, *_ = convert_to_dst_type(out, dst=img, dtype=np.float32)
return out_val
class Affine(InvertibleTransform):
"""
Transform ``img`` given the affine parameters.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = list(set(AffineGrid.backend) & set(Resample.backend))
@deprecated_arg(name="norm_coords", since="0.8")
def __init__(
self,
rotate_params: Optional[Union[Sequence[float], float]] = None,
shear_params: Optional[Union[Sequence[float], float]] = None,
translate_params: Optional[Union[Sequence[float], float]] = None,
scale_params: Optional[Union[Sequence[float], float]] = None,
affine: Optional[NdarrayOrTensor] = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Union[str, int] = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.REFLECTION,
normalized: bool = False,
norm_coords: bool = True,
device: Optional[torch.device] = None,
dtype: DtypeLike = np.float32,
image_only: bool = False,
) -> None:
"""
The affine transformations are applied in rotate, shear, translate, scale order.
Args:
rotate_params: a rotation angle in radians, a scalar for 2D image, a tuple of 3 floats for 3D.
Defaults to no rotation.
shear_params: shearing factors for affine matrix, take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
a tuple of 2 floats for 2D, a tuple of 6 floats for 3D. Defaults to no shearing.
translate_params: a tuple of 2 floats for 2D, a tuple of 3 floats for 3D. Translation is in
pixel/voxel relative to the center of the input image. Defaults to no translation.
scale_params: scale factor for every spatial dims. a tuple of 2 floats for 2D,
a tuple of 3 floats for 3D. Defaults to `1.0`.
affine: If applied, ignore the params (`rotate_params`, etc.) and use the
supplied matrix. Should be square with each side = num of image spatial
dimensions + 1.
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
normalized: indicating whether the provided `affine` is defined to include a normalization
transform converting the coordinates from `[-(size-1)/2, (size-1)/2]` (defined in ``create_grid``) to
`[0, size - 1]` or `[-1, 1]` in order to be compatible with the underlying resampling API.
If `normalized=False`, additional coordinate normalization will be applied before resampling.
See also: :py:func:`monai.networks.utils.normalize_transform`.
device: device on which the tensor will be allocated.
dtype: data type for resampling computation. Defaults to ``float32``.
If ``None``, use the data type of input data. To be compatible with other modules,
the output data type is always `float32`.
image_only: if True return only the image volume, otherwise return (image, affine).
.. deprecated:: 0.8.1
``norm_coords`` is deprecated, please use ``normalized`` instead
(the new flag is a negation, i.e., ``norm_coords == not normalized``).
"""
self.affine_grid = AffineGrid(
rotate_params=rotate_params,
shear_params=shear_params,
translate_params=translate_params,
scale_params=scale_params,
affine=affine,
dtype=dtype,
device=device,
)
self.image_only = image_only
self.norm_coord = not normalized
self.resampler = Resample(norm_coords=self.norm_coord, device=device, dtype=dtype)
self.spatial_size = spatial_size
self.mode = mode
self.padding_mode: str = padding_mode
def __call__(
self,
img: torch.Tensor,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Union[str, int, None] = None,
padding_mode: Optional[str] = None,
) -> Union[torch.Tensor, Tuple[torch.Tensor, NdarrayOrTensor]]:
"""
Args:
img: shape must be (num_channels, H, W[, D]),
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if `img` has two spatial dimensions, `spatial_size` should have 2 elements [h, w].
if `img` has three spatial dimensions, `spatial_size` should have 3 elements [h, w, d].
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
"""
img = convert_to_tensor(img, track_meta=get_track_meta())
img_size = img.shape[1:]
sp_size = fall_back_tuple(self.spatial_size if spatial_size is None else spatial_size, img_size)
_mode = mode if mode is not None else self.mode
_padding_mode = padding_mode if padding_mode is not None else self.padding_mode
grid, affine = self.affine_grid(spatial_size=sp_size)
out = self.resampler(img, grid=grid, mode=_mode, padding_mode=_padding_mode)
if not isinstance(out, MetaTensor):
return out if self.image_only else (out, affine)
if get_track_meta():
out.meta = img.meta # type: ignore
self.update_meta(out, affine, img_size, sp_size)
self.push_transform(
out, orig_size=img_size, extra_info={"affine": affine, "mode": _mode, "padding_mode": _padding_mode}
)
return out if self.image_only else (out, affine)
@classmethod
def compute_w_affine(cls, affine, mat, img_size, sp_size):
r = len(affine) - 1
mat = to_affine_nd(r, mat)
shift_1 = create_translate(r, [float(d - 1) / 2 for d in img_size[:r]])
shift_2 = create_translate(r, [-float(d - 1) / 2 for d in sp_size[:r]])
mat = shift_1 @ convert_data_type(mat, np.ndarray)[0] @ shift_2
return affine @ convert_to_dst_type(mat, affine)[0]
def update_meta(self, img, mat, img_size, sp_size):
affine = convert_data_type(img.affine, torch.Tensor)[0]
img.affine = Affine.compute_w_affine(affine, mat, img_size, sp_size)
def inverse(self, data: torch.Tensor) -> torch.Tensor:
transform = self.pop_transform(data)
orig_size = transform[TraceKeys.ORIG_SIZE]
# Create inverse transform
fwd_affine = transform[TraceKeys.EXTRA_INFO]["affine"]
mode = transform[TraceKeys.EXTRA_INFO]["mode"]
padding_mode = transform[TraceKeys.EXTRA_INFO]["padding_mode"]
inv_affine = linalg_inv(fwd_affine)
inv_affine = convert_to_dst_type(inv_affine, data, dtype=inv_affine.dtype)[0]
affine_grid = AffineGrid(affine=inv_affine)
grid, _ = affine_grid(orig_size)
# Apply inverse transform
out = self.resampler(data, grid, mode, padding_mode)
if not isinstance(out, MetaTensor):
out = MetaTensor(out)
out.meta = data.meta # type: ignore
self.update_meta(out, inv_affine, data.shape[1:], orig_size)
return out
class RandAffine(RandomizableTransform, InvertibleTransform):
"""
Random affine transform.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = Affine.backend
def __init__(
self,
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Union[str, int] = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.REFLECTION,
cache_grid: bool = False,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
prob: probability of returning a randomized affine grid.
defaults to 0.1, with 10% chance returns a randomized grid.
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 2 floats for 2D, a tuple of 6 floats for 3D) for affine matrix,
take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select pixel/voxel to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``bilinear``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``reflection``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
cache_grid: whether to cache the identity sampling grid.
If the spatial size is not dynamically defined by input image, enabling this option could
accelerate the transform.
device: device on which the tensor will be allocated.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
"""
RandomizableTransform.__init__(self, prob)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.spatial_size = spatial_size
self.cache_grid = cache_grid
self._cached_grid = self._init_identity_cache()
self.mode = mode
self.padding_mode: str = padding_mode
def _init_identity_cache(self):
"""
Create cache of the identity grid if cache_grid=True and spatial_size is known.
"""
if self.spatial_size is None:
if self.cache_grid:
warnings.warn(
"cache_grid=True is not compatible with the dynamic spatial_size, please specify 'spatial_size'."
)
return None
_sp_size = ensure_tuple(self.spatial_size)
_ndim = len(_sp_size)
if _sp_size != fall_back_tuple(_sp_size, [1] * _ndim) or _sp_size != fall_back_tuple(_sp_size, [2] * _ndim):
# dynamic shape because it falls back to different outcomes
if self.cache_grid:
warnings.warn(
"cache_grid=True is not compatible with the dynamic spatial_size "
f"'spatial_size={self.spatial_size}', please specify 'spatial_size'."
)
return None
return create_grid(spatial_size=_sp_size, device=self.rand_affine_grid.device, backend="torch")
def get_identity_grid(self, spatial_size: Sequence[int]):
"""
Return a cached or new identity grid depends on the availability.
Args:
spatial_size: non-dynamic spatial size
"""
ndim = len(spatial_size)
if spatial_size != fall_back_tuple(spatial_size, [1] * ndim) or spatial_size != fall_back_tuple(
spatial_size, [2] * ndim
):
raise RuntimeError(f"spatial_size should not be dynamic, got {spatial_size}.")
return (
create_grid(spatial_size=spatial_size, device=self.rand_affine_grid.device, backend="torch")
if self._cached_grid is None
else self._cached_grid
)
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "RandAffine":
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def randomize(self, data: Optional[Any] = None) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.rand_affine_grid.randomize()
def __call__(
self,
img: torch.Tensor,
spatial_size: Optional[Union[Sequence[int], int]] = None,
mode: Union[str, int, None] = None,
padding_mode: Optional[str] = None,
randomize: bool = True,
grid=None,
) -> torch.Tensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]),
spatial_size: output image spatial size.
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if `img` has two spatial dimensions, `spatial_size` should have 2 elements [h, w].
if `img` has three spatial dimensions, `spatial_size` should have 3 elements [h, w, d].
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
randomize: whether to execute `randomize()` function first, default to True.
grid: precomputed grid to be used (mainly to accelerate `RandAffined`).
"""
if randomize:
self.randomize()
# if not doing transform and spatial size doesn't change, nothing to do
# except convert to float and device
sp_size = fall_back_tuple(self.spatial_size if spatial_size is None else spatial_size, img.shape[1:])
do_resampling = self._do_transform or (sp_size != ensure_tuple(img.shape[1:]))
_mode = mode if mode is not None else self.mode
_padding_mode = padding_mode if padding_mode is not None else self.padding_mode
img = convert_to_tensor(img, track_meta=get_track_meta())
if not do_resampling:
out: torch.Tensor = convert_data_type(img, dtype=torch.float32, device=self.resampler.device)[0]
else:
if grid is None:
grid = self.get_identity_grid(sp_size)
if self._do_transform:
grid = self.rand_affine_grid(grid=grid, randomize=randomize)
out = self.resampler(img=img, grid=grid, mode=_mode, padding_mode=_padding_mode)
mat = self.rand_affine_grid.get_transformation_matrix()
out = convert_to_tensor(out, track_meta=get_track_meta())
if get_track_meta():
self.push_transform(
out,
orig_size=img.shape[1:],
extra_info={
"affine": mat,
"mode": _mode,
"padding_mode": _padding_mode,
"do_resampling": do_resampling,
},
)
self.update_meta(out, mat, img.shape[1:], sp_size)
return out
def update_meta(self, img, mat, img_size, sp_size):
affine = convert_data_type(img.affine, torch.Tensor)[0]
img.affine = Affine.compute_w_affine(affine, mat, img_size, sp_size)
def inverse(self, data: torch.Tensor) -> torch.Tensor:
transform = self.pop_transform(data)
# if transform was not performed nothing to do.
if not transform[TraceKeys.EXTRA_INFO]["do_resampling"]:
return data
orig_size = transform[TraceKeys.ORIG_SIZE]
orig_size = fall_back_tuple(orig_size, data.shape[1:])
# Create inverse transform
fwd_affine = transform[TraceKeys.EXTRA_INFO]["affine"]
mode = transform[TraceKeys.EXTRA_INFO]["mode"]
padding_mode = transform[TraceKeys.EXTRA_INFO]["padding_mode"]
inv_affine = linalg_inv(fwd_affine)
inv_affine = convert_to_dst_type(inv_affine, data, dtype=inv_affine.dtype)[0]
affine_grid = AffineGrid(affine=inv_affine)
grid, _ = affine_grid(orig_size)
# Apply inverse transform
out = self.resampler(data, grid, mode, padding_mode)
if not isinstance(out, MetaTensor):
out = MetaTensor(out)
out.meta = data.meta # type: ignore
self.update_meta(out, inv_affine, data.shape[1:], orig_size)
return out
class Rand2DElastic(RandomizableTransform):
"""
Random elastic deformation and affine in 2D.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = Resample.backend
def __init__(
self,
spacing: Union[Tuple[float, float], float],
magnitude_range: Tuple[float, float],
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Tuple[int, int], int]] = None,
mode: Union[str, int] = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.REFLECTION,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
spacing : distance in between the control points.
magnitude_range: the random offsets will be generated from ``uniform[magnitude[0], magnitude[1])``.
prob: probability of returning a randomized elastic transform.
defaults to 0.1, with 10% chance returns a randomized elastic transform,
otherwise returns a ``spatial_size`` centered area extracted from the input image.
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 2 floats for 2D) for affine matrix, take a 2D affine as example::
[
[1.0, params[0], 0.0],
[params[1], 1.0, 0.0],
[0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select pixel to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
spatial_size: specifying output image spatial size [h, w].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, -1)` will be adapted
to `(32, 64)` if the second spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
device: device on which the tensor will be allocated.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
"""
RandomizableTransform.__init__(self, prob)
self.deform_grid = RandDeformGrid(spacing=spacing, magnitude_range=magnitude_range, device=device)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.device = device
self.spatial_size = spatial_size
self.mode = mode
self.padding_mode: str = padding_mode
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand2DElastic":
self.deform_grid.set_random_state(seed, state)
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def set_device(self, device):
self.deform_grid.device = device
self.rand_affine_grid.device = device
self.resampler.device = device
self.device = device
def randomize(self, spatial_size: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.deform_grid.randomize(spatial_size)
self.rand_affine_grid.randomize()
def __call__(
self,
img: torch.Tensor,
spatial_size: Optional[Union[Tuple[int, int], int]] = None,
mode: Union[str, int, None] = None,
padding_mode: Optional[str] = None,
randomize: bool = True,
) -> torch.Tensor:
"""
Args:
img: shape must be (num_channels, H, W),
spatial_size: specifying output image spatial size [h, w].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
randomize: whether to execute `randomize()` function first, default to True.
"""
sp_size = fall_back_tuple(self.spatial_size if spatial_size is None else spatial_size, img.shape[1:])
if randomize:
self.randomize(spatial_size=sp_size)
if self._do_transform:
grid = self.deform_grid(spatial_size=sp_size)
grid = self.rand_affine_grid(grid=grid)
grid = torch.nn.functional.interpolate(
recompute_scale_factor=True,
input=grid.unsqueeze(0),
scale_factor=list(ensure_tuple(self.deform_grid.spacing)),
mode=InterpolateMode.BICUBIC.value,
align_corners=False,
)
grid = CenterSpatialCrop(roi_size=sp_size)(grid[0])
else:
_device = img.device if isinstance(img, torch.Tensor) else self.device
grid = create_grid(spatial_size=sp_size, device=_device, backend="torch")
out: torch.Tensor = self.resampler(
img,
grid,
mode=mode if mode is not None else self.mode,
padding_mode=padding_mode if padding_mode is not None else self.padding_mode,
)
return out
class Rand3DElastic(RandomizableTransform):
"""
Random elastic deformation and affine in 3D.
A tutorial is available: https://github.com/Project-MONAI/tutorials/blob/0.6.0/modules/transforms_demo_2d.ipynb.
"""
backend = Resample.backend
def __init__(
self,
sigma_range: Tuple[float, float],
magnitude_range: Tuple[float, float],
prob: float = 0.1,
rotate_range: RandRange = None,
shear_range: RandRange = None,
translate_range: RandRange = None,
scale_range: RandRange = None,
spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,
mode: Union[str, int] = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.REFLECTION,
device: Optional[torch.device] = None,
) -> None:
"""
Args:
sigma_range: a Gaussian kernel with standard deviation sampled from
``uniform[sigma_range[0], sigma_range[1])`` will be used to smooth the random offset grid.
magnitude_range: the random offsets on the grid will be generated from
``uniform[magnitude[0], magnitude[1])``.
prob: probability of returning a randomized elastic transform.
defaults to 0.1, with 10% chance returns a randomized elastic transform,
otherwise returns a ``spatial_size`` centered area extracted from the input image.
rotate_range: angle range in radians. If element `i` is a pair of (min, max) values, then
`uniform[-rotate_range[i][0], rotate_range[i][1])` will be used to generate the rotation parameter
for the `i`th spatial dimension. If not, `uniform[-rotate_range[i], rotate_range[i])` will be used.
This can be altered on a per-dimension basis. E.g., `((0,3), 1, ...)`: for dim0, rotation will be
in range `[0, 3]`, and for dim1 `[-1, 1]` will be used. Setting a single value will use `[-x, x]`
for dim0 and nothing for the remaining dimensions.
shear_range: shear range with format matching `rotate_range`, it defines the range to randomly select
shearing factors(a tuple of 6 floats for 3D) for affine matrix, take a 3D affine as example::
[
[1.0, params[0], params[1], 0.0],
[params[2], 1.0, params[3], 0.0],
[params[4], params[5], 1.0, 0.0],
[0.0, 0.0, 0.0, 1.0],
]
translate_range: translate range with format matching `rotate_range`, it defines the range to randomly
select voxel to translate for every spatial dims.
scale_range: scaling range with format matching `rotate_range`. it defines the range to randomly select
the scale factor to translate for every spatial dims. A value of 1.0 is added to the result.
This allows 0 to correspond to no change (i.e., a scaling of 1.0).
spatial_size: specifying output image spatial size [h, w, d].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
if some components of the `spatial_size` are non-positive values, the transform will use the
corresponding components of img size. For example, `spatial_size=(32, 32, -1)` will be adapted
to `(32, 32, 64)` if the third spatial dimension size of img is `64`.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"reflection"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
device: device on which the tensor will be allocated.
See also:
- :py:class:`RandAffineGrid` for the random affine parameters configurations.
- :py:class:`Affine` for the affine transformation parameters configurations.
"""
RandomizableTransform.__init__(self, prob)
self.rand_affine_grid = RandAffineGrid(
rotate_range=rotate_range,
shear_range=shear_range,
translate_range=translate_range,
scale_range=scale_range,
device=device,
)
self.resampler = Resample(device=device)
self.sigma_range = sigma_range
self.magnitude_range = magnitude_range
self.spatial_size = spatial_size
self.mode = mode
self.padding_mode: str = padding_mode
self.device = device
self.rand_offset: np.ndarray
self.magnitude = 1.0
self.sigma = 1.0
def set_random_state(
self, seed: Optional[int] = None, state: Optional[np.random.RandomState] = None
) -> "Rand3DElastic":
self.rand_affine_grid.set_random_state(seed, state)
super().set_random_state(seed, state)
return self
def set_device(self, device):
self.rand_affine_grid.device = device
self.resampler.device = device
self.device = device
def randomize(self, grid_size: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return None
self.rand_offset = self.R.uniform(-1.0, 1.0, [3] + list(grid_size)).astype(np.float32, copy=False)
self.magnitude = self.R.uniform(self.magnitude_range[0], self.magnitude_range[1])
self.sigma = self.R.uniform(self.sigma_range[0], self.sigma_range[1])
self.rand_affine_grid.randomize()
def __call__(
self,
img: torch.Tensor,
spatial_size: Optional[Union[Tuple[int, int, int], int]] = None,
mode: Union[str, int, None] = None,
padding_mode: Optional[str] = None,
randomize: bool = True,
) -> torch.Tensor:
"""
Args:
img: shape must be (num_channels, H, W, D),
spatial_size: specifying spatial 3D output image spatial size [h, w, d].
if `spatial_size` and `self.spatial_size` are not defined, or smaller than 1,
the transform will use the spatial size of `img`.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
randomize: whether to execute `randomize()` function first, default to True.
"""
sp_size = fall_back_tuple(self.spatial_size if spatial_size is None else spatial_size, img.shape[1:])
if randomize:
self.randomize(grid_size=sp_size)
_device = img.device if isinstance(img, torch.Tensor) else self.device
grid = create_grid(spatial_size=sp_size, device=_device, backend="torch")
if self._do_transform:
if self.rand_offset is None:
raise RuntimeError("rand_offset is not initialized.")
gaussian = GaussianFilter(3, self.sigma, 3.0).to(device=_device)
offset = torch.as_tensor(self.rand_offset, device=_device).unsqueeze(0)
grid[:3] += gaussian(offset)[0] * self.magnitude
grid = self.rand_affine_grid(grid=grid)
out: torch.Tensor = self.resampler(
img,
grid, # type: ignore
mode=mode if mode is not None else self.mode,
padding_mode=padding_mode if padding_mode is not None else self.padding_mode,
)
return out
class GridDistortion(Transform):
backend = [TransformBackends.TORCH]
def __init__(
self,
num_cells: Union[Tuple[int], int],
distort_steps: Sequence[Sequence[float]],
mode: Union[str, int] = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.BORDER,
device: Optional[torch.device] = None,
) -> None:
"""
Grid distortion transform. Refer to:
https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py
Args:
num_cells: number of grid cells on each dimension.
distort_steps: This argument is a list of tuples, where each tuple contains the distort steps of the
corresponding dimensions (in the order of H, W[, D]). The length of each tuple equals to `num_cells + 1`.
Each value in the tuple represents the distort step of the related cell.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
device: device on which the tensor will be allocated.
"""
self.resampler = Resample(mode=mode, padding_mode=padding_mode, device=device)
self.num_cells = num_cells
self.distort_steps = distort_steps
self.device = device
def __call__(
self,
img: torch.Tensor,
distort_steps: Optional[Sequence[Sequence]] = None,
mode: Optional[str] = None,
padding_mode: Optional[str] = None,
) -> torch.Tensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]).
distort_steps: This argument is a list of tuples, where each tuple contains the distort steps of the
corresponding dimensions (in the order of H, W[, D]). The length of each tuple equals to `num_cells + 1`.
Each value in the tuple represents the distort step of the related cell.
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
"""
distort_steps = self.distort_steps if distort_steps is None else distort_steps
if len(img.shape) != len(distort_steps) + 1:
raise ValueError("the spatial size of `img` does not match with the length of `distort_steps`")
all_ranges = []
num_cells = ensure_tuple_rep(self.num_cells, len(img.shape) - 1)
for dim_idx, dim_size in enumerate(img.shape[1:]):
dim_distort_steps = distort_steps[dim_idx]
ranges = torch.zeros(dim_size, dtype=torch.float32)
cell_size = dim_size // num_cells[dim_idx]
prev = 0
for idx in range(num_cells[dim_idx] + 1):
start = int(idx * cell_size)
end = start + cell_size
if end > dim_size:
end = dim_size
cur = dim_size
else:
cur = prev + cell_size * dim_distort_steps[idx]
ranges[start:end] = torch.linspace(prev, cur, end - start)
prev = cur
ranges = ranges - (dim_size - 1.0) / 2.0
all_ranges.append(ranges)
coords = meshgrid_ij(*all_ranges)
grid = torch.stack([*coords, torch.ones_like(coords[0])])
return self.resampler(img, grid=grid, mode=mode, padding_mode=padding_mode)
class RandGridDistortion(RandomizableTransform):
backend = [TransformBackends.TORCH]
def __init__(
self,
num_cells: Union[Tuple[int], int] = 5,
prob: float = 0.1,
distort_limit: Union[Tuple[float, float], float] = (-0.03, 0.03),
mode: Union[str, int] = GridSampleMode.BILINEAR,
padding_mode: str = GridSamplePadMode.BORDER,
device: Optional[torch.device] = None,
) -> None:
"""
Random grid distortion transform. Refer to:
https://github.com/albumentations-team/albumentations/blob/master/albumentations/augmentations/transforms.py
Args:
num_cells: number of grid cells on each dimension.
prob: probability of returning a randomized grid distortion transform. Defaults to 0.1.
distort_limit: range to randomly distort.
If single number, distort_limit is picked from (-distort_limit, distort_limit).
Defaults to (-0.03, 0.03).
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``"border"``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
device: device on which the tensor will be allocated.
"""
RandomizableTransform.__init__(self, prob)
self.num_cells = num_cells
if isinstance(distort_limit, (int, float)):
self.distort_limit = (min(-distort_limit, distort_limit), max(-distort_limit, distort_limit))
else:
self.distort_limit = (min(distort_limit), max(distort_limit))
self.distort_steps: Sequence[Sequence[float]] = ((1.0,),)
self.grid_distortion = GridDistortion(
num_cells=num_cells, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode, device=device
)
def randomize(self, spatial_shape: Sequence[int]) -> None:
super().randomize(None)
if not self._do_transform:
return
self.distort_steps = tuple(
tuple(1.0 + self.R.uniform(low=self.distort_limit[0], high=self.distort_limit[1], size=n_cells + 1))
for n_cells in ensure_tuple_rep(self.num_cells, len(spatial_shape))
)
def __call__(
self, img: torch.Tensor, mode: Optional[str] = None, padding_mode: Optional[str] = None, randomize: bool = True
) -> torch.Tensor:
"""
Args:
img: shape must be (num_channels, H, W[, D]).
mode: {``"bilinear"``, ``"nearest"``} or spline interpolation order 0-5 (integers).
Interpolation mode to calculate output values. Defaults to ``self.mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When it's an integer, the numpy (cpu tensor)/cupy (cuda tensor) backends will be used
and the value represents the order of the spline interpolation.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values. Defaults to ``self.padding_mode``.
See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.grid_sample.html
When `mode` is an integer, using numpy/cupy backends, this argument accepts
{'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'}.
See also: https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html
randomize: whether to shuffle the random factors using `randomize()`, default to True.
"""
if randomize:
self.randomize(img.shape[1:])
if not self._do_transform:
return convert_to_tensor(img, track_meta=get_track_meta()) # type: ignore
return self.grid_distortion(img, distort_steps=self.distort_steps, mode=mode, padding_mode=padding_mode)
class GridSplit(Transform):
"""
Split the image into patches based on the provided grid in 2D.
Args:
grid: a tuple define the shape of the grid upon which the image is split. Defaults to (2, 2)
size: a tuple or an integer that defines the output patch sizes.
If it's an integer, the value will be repeated for each dimension.
The default is None, where the patch size will be inferred from the grid shape.
Example:
Given an image (torch.Tensor or numpy.ndarray) with size of (3, 10, 10) and a grid of (2, 2),
it will return a Tensor or array with the size of (4, 3, 5, 5).
Here, if the `size` is provided, the returned shape will be (4, 3, size, size)
Note: This transform currently support only image with two spatial dimensions.
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(self, grid: Tuple[int, int] = (2, 2), size: Optional[Union[int, Tuple[int, int]]] = None):
# Grid size
self.grid = grid
# Patch size
self.size = None if size is None else ensure_tuple_rep(size, len(self.grid))
def __call__(
self, image: NdarrayOrTensor, size: Optional[Union[int, Tuple[int, int], np.ndarray]] = None
) -> List[NdarrayOrTensor]:
input_size = self.size if size is None else ensure_tuple_rep(size, len(self.grid))
if self.grid == (1, 1) and input_size is None:
return [image]
split_size, steps = self._get_params(image.shape[1:], input_size)
patches: List[NdarrayOrTensor]
as_strided_func: Callable
if isinstance(image, torch.Tensor):
as_strided_func = torch.as_strided
c_stride, x_stride, y_stride = image.stride() # type: ignore
elif isinstance(image, np.ndarray):
as_strided_func = np.lib.stride_tricks.as_strided
c_stride, x_stride, y_stride = image.strides
else:
raise ValueError(f"Input type [{type(image)}] is not supported.")
x_step, y_step = steps
n_channels = image.shape[0]
strided_image = as_strided_func(
image,
(*self.grid, n_channels, split_size[0], split_size[1]),
(x_stride * x_step, y_stride * y_step, c_stride, x_stride, y_stride),
)
# Flatten the first two dimensions
strided_image = strided_image.reshape(-1, *strided_image.shape[2:])
# Make a list of contiguous patches
if isinstance(image, torch.Tensor):
patches = [p.contiguous() for p in strided_image]
elif isinstance(image, np.ndarray):
patches = [np.ascontiguousarray(p) for p in strided_image]
return patches
def _get_params(
self, image_size: Union[Sequence[int], np.ndarray], size: Optional[Union[Sequence[int], np.ndarray]] = None
):
"""
Calculate the size and step required for splitting the image
Args:
The size of the input image
"""
if size is None:
# infer each sub-image size from the image size and the grid
size = tuple(image_size[i] // self.grid[i] for i in range(len(self.grid)))
if any(size[i] > image_size[i] for i in range(len(self.grid))):
raise ValueError(f"The image size ({image_size})is smaller than the requested split size ({size})")
steps = tuple(
(image_size[i] - size[i]) // (self.grid[i] - 1) if self.grid[i] > 1 else image_size[i]
for i in range(len(self.grid))
)
return size, steps
class GridPatch(Transform):
"""
Extract all the patches sweeping the entire image in a row-major sliding-window manner with possible overlaps.
It can sort the patches and return all or a subset of them.
Args:
patch_size: size of patches to generate slices for, 0 or None selects whole dimension
offset: offset of starting position in the array, default is 0 for each dimension.
num_patches: number of patches to return. Defaults to None, which returns all the available patches.
If the required patches are more than the available patches, padding will be applied.
overlap: the amount of overlap of neighboring patches in each dimension (a value between 0.0 and 1.0).
If only one float number is given, it will be applied to all dimensions. Defaults to 0.0.
sort_fn: when `num_patches` is provided, it determines if keep patches with highest values (`"max"`),
lowest values (`"min"`), or in their default order (`None`). Default to None.
threshold: a value to keep only the patches whose sum of intensities are less than the threshold.
Defaults to no filtering.
pad_mode: refer to NumpyPadMode and PytorchPadMode. If None, no padding will be applied. Defaults to ``"constant"``.
pad_kwargs: other arguments for the `np.pad` or `torch.pad` function.
Returns:
MetaTensor: A MetaTensor consisting of a batch of all the patches with associated metadata
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
patch_size: Sequence[int],
offset: Optional[Sequence[int]] = None,
num_patches: Optional[int] = None,
overlap: Union[Sequence[float], float] = 0.0,
sort_fn: Optional[str] = None,
threshold: Optional[float] = None,
pad_mode: str = PytorchPadMode.CONSTANT,
**pad_kwargs,
):
self.patch_size = ensure_tuple(patch_size)
self.offset = ensure_tuple(offset) if offset else (0,) * len(self.patch_size)
self.pad_mode: Optional[NumpyPadMode] = convert_pad_mode(dst=np.zeros(1), mode=pad_mode) if pad_mode else None
self.pad_kwargs = pad_kwargs
self.overlap = overlap
self.num_patches = num_patches
self.sort_fn = sort_fn.lower() if sort_fn else None
self.threshold = threshold
def filter_threshold(self, image_np: np.ndarray, locations: np.ndarray):
"""
Filter the patches and their locations according to a threshold
Args:
image_np: a numpy.ndarray representing a stack of patches
locations: a numpy.ndarray representing the stack of location of each patch
"""
if self.threshold is not None:
n_dims = len(image_np.shape)
idx = np.argwhere(image_np.sum(axis=tuple(range(1, n_dims))) < self.threshold).reshape(-1)
image_np = image_np[idx]
locations = locations[idx]
return image_np, locations
def filter_count(self, image_np: np.ndarray, locations: np.ndarray):
"""
Sort the patches based on the sum of their intensity, and just keep `self.num_patches` of them.
Args:
image_np: a numpy.ndarray representing a stack of patches
locations: a numpy.ndarray representing the stack of location of each patch
"""
if self.sort_fn is None:
image_np = image_np[: self.num_patches]
locations = locations[: self.num_patches]
elif self.num_patches is not None:
n_dims = len(image_np.shape)
if self.sort_fn == GridPatchSort.MIN:
idx = np.argsort(image_np.sum(axis=tuple(range(1, n_dims))))
elif self.sort_fn == GridPatchSort.MAX:
idx = np.argsort(-image_np.sum(axis=tuple(range(1, n_dims))))
else:
raise ValueError(f'`sort_fn` should be either "min", "max" or None! {self.sort_fn} provided!')
idx = idx[: self.num_patches]
image_np = image_np[idx]
locations = locations[idx]
return image_np, locations
def __call__(self, array: NdarrayOrTensor):
# create the patch iterator which sweeps the image row-by-row
array_np, *_ = convert_data_type(array, np.ndarray)
patch_iterator = iter_patch(
array_np,
patch_size=(None,) + self.patch_size, # expand to have the channel dim
start_pos=(0,) + self.offset, # expand to have the channel dim
overlap=self.overlap,
copy_back=False,
mode=self.pad_mode,
**self.pad_kwargs,
)
patches = list(zip(*patch_iterator))
patched_image = np.array(patches[0])
locations = np.array(patches[1])[:, 1:, 0] # only keep the starting location
# Filter patches
if self.num_patches:
patched_image, locations = self.filter_count(patched_image, locations)
elif self.threshold:
patched_image, locations = self.filter_threshold(patched_image, locations)
# Pad the patch list to have the requested number of patches
if self.num_patches:
padding = self.num_patches - len(patched_image)
if padding > 0:
patched_image = np.pad(
patched_image,
[[0, padding], [0, 0]] + [[0, 0]] * len(self.patch_size),
constant_values=self.pad_kwargs.get("constant_values", 0),
)
locations = np.pad(locations, [[0, padding], [0, 0]], constant_values=0)
# Convert to MetaTensor
metadata = array.meta if isinstance(array, MetaTensor) else MetaTensor.get_default_meta()
metadata[WSIPatchKeys.LOCATION] = locations.T
metadata[WSIPatchKeys.COUNT] = len(locations)
metadata["spatial_shape"] = np.tile(np.array(self.patch_size), (len(locations), 1)).T
output = MetaTensor(x=patched_image, meta=metadata)
output.is_batch = True
return output
class RandGridPatch(GridPatch, RandomizableTransform):
"""
Extract all the patches sweeping the entire image in a row-major sliding-window manner with possible overlaps,
and with random offset for the minimal corner of the image, (0,0) for 2D and (0,0,0) for 3D.
It can sort the patches and return all or a subset of them.
Args:
patch_size: size of patches to generate slices for, 0 or None selects whole dimension
min_offset: the minimum range of offset to be selected randomly. Defaults to 0.
max_offset: the maximum range of offset to be selected randomly.
Defaults to image size modulo patch size.
num_patches: number of patches to return. Defaults to None, which returns all the available patches.
overlap: the amount of overlap of neighboring patches in each dimension (a value between 0.0 and 1.0).
If only one float number is given, it will be applied to all dimensions. Defaults to 0.0.
sort_fn: when `num_patches` is provided, it determines if keep patches with highest values (`"max"`),
lowest values (`"min"`), or in their default order (`None`). Default to None.
threshold: a value to keep only the patches whose sum of intensities are less than the threshold.
Defaults to no filtering.
pad_mode: refer to NumpyPadMode and PytorchPadMode. If None, no padding will be applied. Defaults to ``"constant"``.
pad_kwargs: other arguments for the `np.pad` or `torch.pad` function.
Returns:
MetaTensor: A MetaTensor consisting of a batch of all the patches with associated metadata
"""
backend = [TransformBackends.TORCH, TransformBackends.NUMPY]
def __init__(
self,
patch_size: Sequence[int],
min_offset: Optional[Union[Sequence[int], int]] = None,
max_offset: Optional[Union[Sequence[int], int]] = None,
num_patches: Optional[int] = None,
overlap: Union[Sequence[float], float] = 0.0,
sort_fn: Optional[str] = None,
threshold: Optional[float] = None,
pad_mode: str = PytorchPadMode.CONSTANT,
**pad_kwargs,
):
super().__init__(
patch_size=patch_size,
offset=(),
num_patches=num_patches,
overlap=overlap,
sort_fn=sort_fn,
threshold=threshold,
pad_mode=pad_mode,
**pad_kwargs,
)
self.min_offset = min_offset
self.max_offset = max_offset
def randomize(self, array):
if self.min_offset is None:
min_offset = (0,) * len(self.patch_size)
else:
min_offset = ensure_tuple_rep(self.min_offset, len(self.patch_size))
if self.max_offset is None:
max_offset = tuple(s % p for s, p in zip(array.shape[1:], self.patch_size))
else:
max_offset = ensure_tuple_rep(self.max_offset, len(self.patch_size))
self.offset = tuple(self.R.randint(low=low, high=high + 1) for low, high in zip(min_offset, max_offset))
def __call__(self, array: NdarrayOrTensor, randomize: bool = True):
if randomize:
self.randomize(array)
return super().__call__(array)
| {
"content_hash": "9a7fd800ceee13e996687f66073a05c2",
"timestamp": "",
"source": "github",
"line_count": 3368,
"max_line_length": 129,
"avg_line_length": 51.39489311163896,
"alnum_prop": 0.6085743336144843,
"repo_name": "Project-MONAI/MONAI",
"id": "dcddefce3a4724b1ef1e521b7762c269d4f29e86",
"size": "173671",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "monai/transforms/spatial/array.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15956"
},
{
"name": "C++",
"bytes": "189648"
},
{
"name": "Cuda",
"bytes": "154905"
},
{
"name": "Dockerfile",
"bytes": "2454"
},
{
"name": "Python",
"bytes": "7209898"
},
{
"name": "Shell",
"bytes": "20587"
}
],
"symlink_target": ""
} |
import warnings
import numpy as np
from petram.mfem_config import use_parallel
if use_parallel:
from petram.helper.mpi_recipes import *
import mfem.par as mfem
else:
import mfem.ser as mfem
def make_matrix(x, y, z):
pass
def do_findpoints(mesh, *args):
sdim = mesh.SpaceDimension()
shape = args[0].shape
size= len(args[0].flatten())
ptx = np.vstack([t.flatten() for t in args]).transpose().flatten()
ptx2 = mfem.Vector(ptx)
point_mat = mfem.DenseMatrix(ptx2.GetData(), size, sdim)
elem_id = mfem.intArray()
ips = mfem.IntegrationPointArray()
num_found = mesh.FindPoints(point_mat, elem_id, ips, True)
elem_id = np.array(elem_id.ToList())
return v, elem_id, ips
def eval_at_points(gf, *args):
args = [np.atleast_1d(np.array(t, copy=False)) for t in args]
mesh = gf.FESpace().Mesh()
v, elem_id, ips = findpoints(mesh, *args)
def findpoints(mesh, *args):
'''
*args : x, y or x, y, z
'''
sdim = mesh.SpaceDimension()
if len(args) != 3 and sdim == 3:
assert False, "SpaceDimension = 3, pass x, y, z"
elif len(args) != 2 and sdim == 2:
assert False, "SpaceDimension = 3, pass x, y"
elif len(args) != 1 and sdim == 1:
assert False, "SpaceDimension = 3, pass x"
else:
args = [np.atleast_1d(np.array(t, copy=False)) for t in args]
return do_findpoints(mesh, *args)
| {
"content_hash": "50691c358fd8302390997eec636e4bdb",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 85,
"avg_line_length": 29.14,
"alnum_prop": 0.5998627316403569,
"repo_name": "mfem/PyMFEM",
"id": "89e3bdcddb2846365fd29b71ccf048d90487891b",
"size": "1457",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mfem/common/findpoints.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "179682"
},
{
"name": "Grammatical Framework",
"bytes": "18800"
},
{
"name": "Makefile",
"bytes": "1055"
},
{
"name": "Python",
"bytes": "265160"
},
{
"name": "SWIG",
"bytes": "371435"
},
{
"name": "Shell",
"bytes": "1650"
}
],
"symlink_target": ""
} |
import os, urllib2, cStringIO
from dota2py import api
import Image
if ___name__ == "__main__":
if not os.path.exists("../data/images"):
os.makedirs("../data/images")
os.chdir("../data/images")
scale = 50/1080.0
for hero in api.get_heroes()["result"]["heroes"]:
if "abyssal_underlord" in hero["name"]:
continue
#if not os.path.exists("{}.png".format(hero["localized_name"])):
print api.get_hero_image_url(hero["name"][len("npc_dota_hero_"):], "lg")
img = Image.open(cStringIO.StringIO(urllib2.urlopen(api.get_hero_image_url(hero["name"][len("npc_dota_hero_"):])).read()))
img = img.resize((89, 50), Image.ANTIALIAS)
img.save("{}.png".format(hero["localized_name"]))
| {
"content_hash": "33aa4ad086555f974fb7ee3534ee7f7d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 127,
"avg_line_length": 35.85,
"alnum_prop": 0.6276150627615062,
"repo_name": "GuitaringEgg/CounterPyck",
"id": "a34207b8bccbde7393ddcbab420fed17194b7534",
"size": "717",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/downloadImages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27469"
}
],
"symlink_target": ""
} |
"""Referral Whois."""
| {
"content_hash": "d6e9a803f444d1dbc2e431ee27bad26d",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.5909090909090909,
"repo_name": "cloudify-cosmo/softlayer-python",
"id": "ef14d68803883cff0772ac45f71704c716910cbc",
"size": "22",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "SoftLayer/CLI/rwhois/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "123"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "Python",
"bytes": "771860"
}
],
"symlink_target": ""
} |
"""A class that performs HTTP-01 challenges for Nginx"""
import io
import logging
from typing import Any
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from certbot_nginx._internal import nginxparser
from certbot_nginx._internal.obj import Addr
from acme import challenges
from acme.challenges import KeyAuthorizationChallengeResponse
from certbot import errors
from certbot.achallenges import KeyAuthorizationAnnotatedChallenge
from certbot.compat import os
from certbot.plugins import common
if TYPE_CHECKING:
from certbot_nginx._internal.configurator import NginxConfigurator
logger = logging.getLogger(__name__)
class NginxHttp01(common.ChallengePerformer):
"""HTTP-01 authenticator for Nginx
:ivar configurator: NginxConfigurator object
:type configurator: :class:`~nginx.configurator.NginxConfigurator`
:ivar list achalls: Annotated
class:`~certbot.achallenges.KeyAuthorizationAnnotatedChallenge`
challenges
:param list indices: Meant to hold indices of challenges in a
larger array. NginxHttp01 is capable of solving many challenges
at once which causes an indexing issue within NginxConfigurator
who must return all responses in order. Imagine
NginxConfigurator maintaining state about where all of the
challenges, possibly of different types, belong in the response
array. This is an optional utility.
"""
def __init__(self, configurator: "NginxConfigurator") -> None:
super().__init__(configurator)
self.configurator: "NginxConfigurator"
self.challenge_conf = os.path.join(
configurator.config.config_dir, "le_http_01_cert_challenge.conf")
def perform(self) -> List[KeyAuthorizationChallengeResponse]:
"""Perform a challenge on Nginx.
:returns: list of :class:`acme.challenges.KeyAuthorizationChallengeResponse`
:rtype: list
"""
if not self.achalls:
return []
responses = [x.response(x.account_key) for x in self.achalls]
# Set up the configuration
self._mod_config()
# Save reversible changes
self.configurator.save("HTTP Challenge", True)
return responses
def _mod_config(self) -> None:
"""Modifies Nginx config to include server_names_hash_bucket_size directive
and server challenge blocks.
:raises .MisconfigurationError:
Unable to find a suitable HTTP block in which to include
authenticator hosts.
"""
included = False
include_directive = ['\n', 'include', ' ', self.challenge_conf]
root = self.configurator.parser.config_root
bucket_directive = ['\n', 'server_names_hash_bucket_size', ' ', '128']
main = self.configurator.parser.parsed[root]
# insert include directive
for line in main:
if line[0] == ['http']:
body = line[1]
if include_directive not in body:
body.insert(0, include_directive)
included = True
break
# insert or update the server_names_hash_bucket_size directive
# We have several options here.
# 1) Only check nginx.conf
# 2) Check included files, assuming they've been included inside http already,
# because if they added it outside an http block their config is broken anyway
# 3) Add metadata during parsing to note if an include happened inside the http block
#
# 1 causes bugs; see https://github.com/certbot/certbot/issues/5199
# 3 would require a more extensive rewrite and probably isn't necessary anyway
# So this code uses option 2.
found_bucket = False
for file_contents in self.configurator.parser.parsed.values():
body = file_contents # already inside http in an included file
for line in file_contents:
if line[0] == ['http']:
body = line[1] # enter http because this is nginx.conf
break
for posn, inner_line in enumerate(body):
if inner_line[0] == bucket_directive[1]:
if int(inner_line[1]) < int(bucket_directive[3]):
body[posn] = bucket_directive
found_bucket = True
break
if found_bucket:
break
if not found_bucket:
for line in main:
if line[0] == ['http']:
body = line[1]
body.insert(0, bucket_directive)
break
if not included:
raise errors.MisconfigurationError(
'Certbot could not find a block to include '
'challenges in %s.' % root)
config = [self._make_or_mod_server_block(achall) for achall in self.achalls]
config = [x for x in config if x is not None]
config = nginxparser.UnspacedList(config)
logger.debug("Generated server block:\n%s", str(config))
self.configurator.reverter.register_file_creation(
True, self.challenge_conf)
with io.open(self.challenge_conf, "w", encoding="utf-8") as new_conf:
nginxparser.dump(config, new_conf)
def _default_listen_addresses(self) -> List[Addr]:
"""Finds addresses for a challenge block to listen on.
:returns: list of :class:`certbot_nginx._internal.obj.Addr` to apply
:rtype: list
"""
addresses: List[Optional[Addr]] = []
default_addr = "%s" % self.configurator.config.http01_port
ipv6_addr = "[::]:{0}".format(
self.configurator.config.http01_port)
port = self.configurator.config.http01_port
ipv6, ipv6only = self.configurator.ipv6_info(str(port))
if ipv6:
# If IPv6 is active in Nginx configuration
if not ipv6only:
# If ipv6only=on is not already present in the config
ipv6_addr = ipv6_addr + " ipv6only=on"
addresses = [Addr.fromstring(default_addr),
Addr.fromstring(ipv6_addr)]
logger.debug(("Using default addresses %s and %s for authentication."),
default_addr,
ipv6_addr)
else:
addresses = [Addr.fromstring(default_addr)]
logger.debug("Using default address %s for authentication.",
default_addr)
return [address for address in addresses if address]
def _get_validation_path(self, achall: KeyAuthorizationAnnotatedChallenge) -> str:
return os.sep + os.path.join(challenges.HTTP01.URI_ROOT_PATH, achall.chall.encode("token"))
def _make_server_block(self, achall: KeyAuthorizationAnnotatedChallenge) -> List[Any]:
"""Creates a server block for a challenge.
:param achall: Annotated HTTP-01 challenge
:type achall: :class:`certbot.achallenges.KeyAuthorizationAnnotatedChallenge`
:returns: server block for the challenge host
:rtype: list
"""
addrs = self._default_listen_addresses()
block = [['listen', ' ', addr.to_string(include_default=False)] for addr in addrs]
# Ensure we 404 on any other request by setting a root
document_root = os.path.join(
self.configurator.config.work_dir, "http_01_nonexistent")
block.extend([['server_name', ' ', achall.domain],
['root', ' ', document_root],
self._location_directive_for_achall(achall)
])
# TODO: do we want to return something else if they otherwise access this block?
return [['server'], block]
def _location_directive_for_achall(self, achall: KeyAuthorizationAnnotatedChallenge
) -> List[Any]:
validation = achall.validation(achall.account_key)
validation_path = self._get_validation_path(achall)
location_directive = [['location', ' ', '=', ' ', validation_path],
[['default_type', ' ', 'text/plain'],
['return', ' ', '200', ' ', validation]]]
return location_directive
def _make_or_mod_server_block(self, achall: KeyAuthorizationAnnotatedChallenge
) -> Optional[List[Any]]:
"""Modifies server blocks to respond to a challenge. Returns a new HTTP server block
to add to the configuration if an existing one can't be found.
:param achall: Annotated HTTP-01 challenge
:type achall: :class:`certbot.achallenges.KeyAuthorizationAnnotatedChallenge`
:returns: new server block to be added, if any
:rtype: list
"""
http_vhosts, https_vhosts = self.configurator.choose_auth_vhosts(achall.domain)
new_vhost: Optional[List[Any]] = None
if not http_vhosts:
# Couldn't find either a matching name+port server block
# or a port+default_server block, so create a dummy block
new_vhost = self._make_server_block(achall)
# Modify any existing server blocks
for vhost in set(http_vhosts + https_vhosts):
location_directive = [self._location_directive_for_achall(achall)]
self.configurator.parser.add_server_directives(vhost, location_directive)
rewrite_directive = [['rewrite', ' ', '^(/.well-known/acme-challenge/.*)',
' ', '$1', ' ', 'break']]
self.configurator.parser.add_server_directives(
vhost, rewrite_directive, insert_at_top=True)
return new_vhost
| {
"content_hash": "2041d9dff73cb3266b9c1d945d89e03c",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 99,
"avg_line_length": 40.358024691358025,
"alnum_prop": 0.6131334760885082,
"repo_name": "letsencrypt/letsencrypt",
"id": "9b086d42950faee79647b932007baaaa606462d4",
"size": "9807",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "certbot-nginx/certbot_nginx/_internal/http_01.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "50702"
},
{
"name": "Augeas",
"bytes": "5062"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1336185"
},
{
"name": "Shell",
"bytes": "147823"
}
],
"symlink_target": ""
} |
#from email.mime.base import MIMEBase
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import select_template
from django.template import Context, RequestContext, TemplateDoesNotExist
class EmailMultiRelated(EmailMultiAlternatives):
"""
A version of EmailMessage that makes it easy to send multipart/related
messages. For example, including text and HTML versions with inline images.
"""
related_subtype = 'related'
def __init__(self, *args, **kwargs):
# self.related_ids = []
self.related_attachments = []
return super(EmailMultiRelated, self).__init__(*args, **kwargs)
def attach_related(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.related_attachments.append(filename)
else:
assert content is not None
self.related_attachments.append((filename, content, mimetype))
def attach_related_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach_related(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(self._create_related_attachments(self._create_alternatives(msg)))
def _create_alternatives(self, msg):
for i, (content, mimetype) in enumerate(self.alternatives):
if mimetype == 'text/html':
for filename, _, _ in self.related_attachments:
content = re.sub(r'(?<!cid:)%s' % re.escape(filename), 'cid:%s' % filename, content)
self.alternatives[i] = (content, mimetype)
return super(EmailMultiRelated, self)._create_alternatives(msg)
def _create_related_attachments(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.related_attachments:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.related_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for related in self.related_attachments:
msg.attach(self._create_related_attachment(*related))
return msg
def _create_related_attachment(self, filename, content, mimetype=None):
"""
Convert the filename, content, mimetype triple into a MIME attachment
object. Adjust headers to use Content-ID where applicable.
Taken from http://code.djangoproject.com/ticket/4771
"""
attachment = super(EmailMultiRelated, self)._create_attachment(filename, content, mimetype)
if filename:
mimetype = attachment['Content-Type']
del(attachment['Content-Type'])
del(attachment['Content-Disposition'])
attachment.add_header('Content-Disposition', 'inline', filename=filename)
attachment.add_header('Content-Type', mimetype, name=filename)
attachment.add_header('Content-ID', '<%s>' % filename)
return attachment
class TemplatedEmail(EmailMultiAlternatives):
"""
A version of EmailMultiRelated, EmailMultiAlternatives, EmailMessage that makes it easy to send templated messages.
https://docs.djangoproject.com/en/dev/topics/email/
Extra parameters
- app_name (required)
- template_name (required)
- context
- request (for extra context)
Send email rendering text and html versions for the specified template name using the context dictionary passed in.
Arguments are as per django's send_mail apart from template which should be the common path and name of the text and html templates without the extension,
For example it wil look for the template in order (default Django behaviour for retrieving templates):
- /myproject/templates/"email/<app_name>/<template_name>/"
- /myproject/templates/"email/<template_name>/"
- /myproject/*<app_name>/templates/"email/<app_name>/<template_name>"/ # NOTE: *<app_name> for every installed app
- /myproject/*<app_name>/templates/"email/<template_name>"/ # NOTE: *<app_name> for every installed app
"""
def __init__(self, app_name, template_name, subject='', body='', context=None, request=None, from_email=None, to=None, \
bcc=None, connection=None, attachments=None, headers=None, alternatives=None, premailer=False):
self.app_name = app_name
self.template_name = template_name
self.premailer = premailer
to = to if not to or hasattr(to, "__iter__") else [to]
bcc = bcc if not bcc or hasattr(bcc, "__iter__") else [bcc]
context = context or {}
self.context_instance = RequestContext(request, context) if request else Context(context)
subject = self.render_subject()
body = self.render_body('txt')
super(TemplatedEmail, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers, alternatives)
self.attach_body('html')
def render_body(self, type):
template_list = ['%s/interaction/email/%s/body.%s' % (self.app_name, self.template_name, type), 'interaction/email/%s/body.%s' % (self.template_name, type)]
template = select_template(template_list)
return template.render(self.context_instance)
def render_subject(self):
template_list = ['%s/interaction/email/%s/subject.txt' % (self.app_name, self.template_name), 'interaction/email/%s/subject.txt' % self.template_name]
template = select_template(template_list)
return template.render(self.context_instance).strip()
def render_premailer(self, html):
import requests
data = {
'html': html,
'link_query_string': '',
'remove_ids': False,
'remove_classes': False,
'remove_comments': False,
}
cleaned_html_url = requests.post('http://premailer.dialect.ca/api/0.1/documents', data).json().get('documents').get('html')
response = requests.get(cleaned_html_url)
return response.content
def attach_body(self, type):
try:
# try loading the html body
body = self.render_body(type)
if type == 'html':
content_type = 'text/html'
if self.premailer:
body = self.render_premailer(body)
else:
content_type = 'text/txt'
self.attach_alternative(body, content_type)
except TemplateDoesNotExist:
pass
# The subject, message, from_email and recipient_list parameters are required.
#
# subject: A string.
# message: A string.
# from_email: A string.
# recipient_list: A list of strings, each an email address. Each member of recipient_list will see the other recipients in the "To:" field of the email message.
# fail_silently: A boolean. If it's False, send_mail will raise an smtplib.SMTPException. See the smtplib docs for a list of possible exceptions, all of which are subclasses of SMTPException.
# auth_user: The optional username to use to authenticate to the SMTP server. If this isn't provided, Django will use the value of the EMAIL_HOST_USER setting.
# auth_password: The optional password to use to authenticate to the SMTP server. If this isn't provided, Django will use the value of the EMAIL_HOST_PASSWORD setting.
# connection: The optional email backend to use to send the mail. If unspecified, an instance of the default backend will be used. See the documentation on Email backends for more details.
def send_mail_with_template(app_name, template_name, receipents, address_from=getattr(settings, 'DEFAULT_FROM_EMAIL', None), context=None, request=None, attachments=None, fail_silently=False):
"""
Send email rendering text and html versions for the specified template name using the context dictionary passed in.
Arguments are as per django's send_mail apart from template which should be the common path and name of the text and html templates without the extension,
For example it wil look for the template in order:
Default django behaviour:
- /myproject/templates/"email/<app_name>/<template_name>/"
- /myproject/templates/"email/<template_name>/"
- /myproject/*<app_name>/templates/"email/<app_name>/<template_name>"/ # NOTE: *<app_name> for every installed app
- /myproject/*<app_name>/templates/"email/<template_name>"/ # NOTE: *<app_name> for every installed app
"""
if context is None:
context = {}
if attachments is None:
attachments = []
if request:
context_instance = RequestContext(request, context)
else:
context_instance = Context(context)
# allow for a single address to be passed in
if not hasattr(receipents, "__iter__"):
receipents = [receipents]
# loads a template passing in vars as context
render_body = lambda type: select_template(['email/%s/%s/body.%s' % (app_name, template_name, type), 'email/%s/body.%s' % (template_name, type)]).render(context_instance)
# remder email's subject
template_list = ['email/%s/%s/subject.txt' % (app_name, template_name), 'email/%s/subject.txt' % template_name]
subject = select_template(template_list).render(context_instance).strip()
# create email
email = EmailMultiAlternatives(subject, render_body('txt'), address_from, receipents)
try:
# try loading the html body
email.attach_alternative(render_body('html'), "text/html")
except TemplateDoesNotExist:
pass
# attachment attachments, what about attach_file?
for attachment in attachments:
#raise Exception(attachment)
email.attach(*attachment)
# send email
email.send(fail_silently=fail_silently) | {
"content_hash": "3b55ca1c94ce2473a7441531fa417c62",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 192,
"avg_line_length": 48.036866359447004,
"alnum_prop": 0.6534919416730621,
"repo_name": "baskoopmans/djcommon",
"id": "1323e0ad2611f12744bdf3acb79cba30fe8b169a",
"size": "10443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djcommon/email.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "220"
},
{
"name": "JavaScript",
"bytes": "3564"
},
{
"name": "Python",
"bytes": "95600"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import time
from django.core import signing
from django.http import HttpRequest, HttpResponse
from django.test import TestCase, override_settings
class SignedCookieTest(TestCase):
def test_can_set_and_read_signed_cookies(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
self.assertIn('c', response.cookies)
self.assertTrue(response.cookies['c'].value.startswith('hello:'))
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value
value = request.get_signed_cookie('c')
self.assertEqual(value, 'hello')
def test_can_use_salt(self):
response = HttpResponse()
response.set_signed_cookie('a', 'hello', salt='one')
request = HttpRequest()
request.COOKIES['a'] = response.cookies['a'].value
value = request.get_signed_cookie('a', salt='one')
self.assertEqual(value, 'hello')
self.assertRaises(signing.BadSignature,
request.get_signed_cookie, 'a', salt='two')
def test_detects_tampering(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value[:-2] + '$$'
self.assertRaises(signing.BadSignature,
request.get_signed_cookie, 'c')
def test_default_argument_suppresses_exceptions(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value[:-2] + '$$'
self.assertEqual(request.get_signed_cookie('c', default=None), None)
def test_max_age_argument(self):
value = 'hello'
_time = time.time
time.time = lambda: 123456789
try:
response = HttpResponse()
response.set_signed_cookie('c', value)
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value
self.assertEqual(request.get_signed_cookie('c'), value)
time.time = lambda: 123456800
self.assertEqual(request.get_signed_cookie('c', max_age=12), value)
self.assertEqual(request.get_signed_cookie('c', max_age=11), value)
self.assertRaises(signing.SignatureExpired,
request.get_signed_cookie, 'c', max_age=10)
finally:
time.time = _time
@override_settings(SECRET_KEY=b'\xe7')
def test_signed_cookies_with_binary_key(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value
self.assertEqual(request.get_signed_cookie('c'), 'hello')
| {
"content_hash": "2cbd0f98ea99784b48346d90f7161609",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 38.61643835616438,
"alnum_prop": 0.6200780418588152,
"repo_name": "helenst/django",
"id": "ff8fe76bf6ad9f02e52d9a2b88e39cc063363ab2",
"size": "2819",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "tests/signed_cookies_tests/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Makefile",
"bytes": "5553"
},
{
"name": "Python",
"bytes": "10130188"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
from onnxruntime.capi._pybind_state import get_model_after_loading_checkpoint as _internal_load_checkpoint_to_model
from onnxruntime.capi._pybind_state import save_checkpoint as _internal_save_checkpoint
def save_checkpoint(parameters, path_to_checkpoint):
"""Saves the parameters to the checkpoint directory path_to_checkpoint."""
if parameters is None:
raise RuntimeError("No checkpoint parameters provided.")
# TODO: use Parameter class to pass information to backend
# Serialize the parameters and save the checkpoint
trainable_params, non_trainable_params = parameters
trainable_params = [param.SerializeToString() for param in trainable_params]
non_trainable_params = [param.SerializeToString() for param in non_trainable_params]
_internal_save_checkpoint(trainable_params, non_trainable_params, path_to_checkpoint)
def load_checkpoint_to_model(path_to_checkpoint, model):
"""Loads the checkpoint to an onnx inference model."""
model.ParseFromString(_internal_load_checkpoint_to_model(path_to_checkpoint, model.SerializeToString()))
| {
"content_hash": "ba710719be5a27b1f1bde3dff4eb1a08",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 115,
"avg_line_length": 49.72727272727273,
"alnum_prop": 0.7696526508226691,
"repo_name": "microsoft/onnxruntime",
"id": "b94d7dd052c185fc84abd9f3502f9f9e00418970",
"size": "1211",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "orttraining/orttraining/python/training/onnxblock/checkpoint_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1763425"
},
{
"name": "Batchfile",
"bytes": "17040"
},
{
"name": "C",
"bytes": "955390"
},
{
"name": "C#",
"bytes": "2304597"
},
{
"name": "C++",
"bytes": "39435305"
},
{
"name": "CMake",
"bytes": "514764"
},
{
"name": "CSS",
"bytes": "138431"
},
{
"name": "Cuda",
"bytes": "1104338"
},
{
"name": "Dockerfile",
"bytes": "8089"
},
{
"name": "HLSL",
"bytes": "11234"
},
{
"name": "HTML",
"bytes": "5933"
},
{
"name": "Java",
"bytes": "418665"
},
{
"name": "JavaScript",
"bytes": "212575"
},
{
"name": "Jupyter Notebook",
"bytes": "218327"
},
{
"name": "Kotlin",
"bytes": "4653"
},
{
"name": "Liquid",
"bytes": "5457"
},
{
"name": "NASL",
"bytes": "2628"
},
{
"name": "Objective-C",
"bytes": "151027"
},
{
"name": "Objective-C++",
"bytes": "107084"
},
{
"name": "Pascal",
"bytes": "9597"
},
{
"name": "PowerShell",
"bytes": "16419"
},
{
"name": "Python",
"bytes": "5041661"
},
{
"name": "Roff",
"bytes": "27539"
},
{
"name": "Ruby",
"bytes": "3545"
},
{
"name": "Shell",
"bytes": "116513"
},
{
"name": "Swift",
"bytes": "115"
},
{
"name": "TypeScript",
"bytes": "973087"
}
],
"symlink_target": ""
} |
import os
import time
from config import MEM_SAVE_PATH, FREE_SIZE, SAVE_PATH, LAST_FILE, MAX_LOCK_WAIT, LOCK
__author__ = 'ihciah'
def clean_env():
to_delete = ['/dev/shm/mmssms.db', '/dev/shm/mmssms.db-journal',
'/tmp/mmssms.db', '/tmp/mmssms.db-journal']
for f in to_delete:
if os.path.exists(f):
os.remove(f)
def get_db_save_path():
statvfs = os.statvfs('/dev/shm')
free = statvfs.f_frsize * statvfs.f_bavail / 1024 / 1024
return MEM_SAVE_PATH if free > FREE_SIZE else SAVE_PATH
class LastFile:
def __init__(self):
self.last_time = self.get_last_time_on_disk()
def get_last_time_on_disk(self):
if os.path.isfile(LAST_FILE):
with open(LAST_FILE) as f:
last = f.read().strip()
if last.isdigit() and len(last) > 9:
return int(last)
last = int(time.time() * 1000)
self.dump_to_disk(last)
return last
def get_last_time(self):
return self.last_time
def dump_to_disk(self, t):
with open(LAST_FILE, "w") as fw:
fw.write(str(t))
def update_time(self, t):
if self.last_time >= t:
return
self.last_time = t
self.dump_to_disk(t)
class FileLock:
@staticmethod
def wait_lock():
wait_time = 0.0
while True:
if wait_time > MAX_LOCK_WAIT:
FileLock.delete_lock()
if os.path.isfile(LOCK):
time.sleep(0.5)
wait_time += 0.5
else:
break
@staticmethod
def create_lock():
open(LOCK, 'a').close()
@staticmethod
def delete_lock():
try:
os.remove(LOCK)
finally:
pass
| {
"content_hash": "3a516ea2962ffb3517e5762771a0144c",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 86,
"avg_line_length": 24.438356164383563,
"alnum_prop": 0.5285874439461884,
"repo_name": "ihciah/AndroidSMSRelay",
"id": "b717c3a71f98552d314d2af1eda0c76ce52df2cf",
"size": "1809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14381"
},
{
"name": "Shell",
"bytes": "235"
}
],
"symlink_target": ""
} |
def filter_list(lst):
return filter(lambda a: isinstance(a, int), lst)
| {
"content_hash": "05f20b68776c1fa80697898466303b9e",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 52,
"avg_line_length": 37.5,
"alnum_prop": 0.6933333333333334,
"repo_name": "the-zebulan/CodeWars",
"id": "efa6f7c7e04119f8c3d18358c963c74d7bf8a063",
"size": "75",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "katas/kyu_7/filter_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
} |
import unittest
import config
import mle
import thread_cert
from pktverify.consts import MLE_CHILD_ID_RESPONSE, ADDR_QRY_URI, ADDR_NTF_URI, NL_TARGET_EID_TLV
from pktverify.packet_verifier import PacketVerifier
LEADER = 1
ROUTER = 2
MED = 3
SED = 4
MTDS = [MED, SED]
# Test Purpose and Description:
# -----------------------------
# The purpose of the test case is to verify that when the timer reaches
# the value of the Timeout TLV sent by the Child, the Parent stops
# responding to Address Query on the Child's behalf
#
# Test Topology:
# --------------
# Leader
# |
# Router
# / \
# MED SED
#
# DUT Types:
# ----------
# Router
class Cert_5_1_02_ChildAddressTimeout(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'allowlist': [ROUTER]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rdn',
'allowlist': [LEADER, MED, SED]
},
MED: {
'name': 'MED',
'is_mtd': True,
'mode': 'rn',
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
SED: {
'name': 'SED',
'is_mtd': True,
'mode': 'n',
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [ROUTER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(config.LEADER_STARTUP_DELAY)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
self.simulator.go(config.ROUTER_STARTUP_DELAY)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[MED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[MED].get_state(), 'child')
self.nodes[SED].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED].get_state(), 'child')
self.collect_ipaddrs()
med_mleid = self.nodes[MED].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
sed_mleid = self.nodes[SED].get_ip6_address(config.ADDRESS_TYPE.ML_EID)
self.nodes[MED].stop()
self.nodes[SED].stop()
self.simulator.go(config.DEFAULT_CHILD_TIMEOUT + 5)
self.assertFalse(self.nodes[LEADER].ping(med_mleid))
self.assertFalse(self.nodes[LEADER].ping(sed_mleid))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
ROUTER = pv.vars['ROUTER']
MED = pv.vars['MED']
MED_MLEID = pv.vars['MED_MLEID']
SED = pv.vars['SED']
SED_MLEID = pv.vars['SED_MLEID']
MM = pv.vars['MM_PORT']
# Step 1: Verify topology is formed correctly
pv.verify_attached('ROUTER')
pkts.filter_wpan_src64(ROUTER).\
filter_wpan_dst64(MED).\
filter_mle_cmd(MLE_CHILD_ID_RESPONSE).\
must_next()
pkts.filter_wpan_src64(ROUTER).\
filter_wpan_dst64(SED).\
filter_mle_cmd(MLE_CHILD_ID_RESPONSE).\
must_next()
# Step 2: Power off both devices and allow for the keep-alive timeout to expire
# Step 3: The Leader sends an ICMPv6 Echo Request to MED and attempts to perform
# address resolution by sending an Address Query Request
pkts.filter_wpan_src64(LEADER).\
filter_RLARMA().\
filter_coap_request(ADDR_QRY_URI, port=MM).\
filter(lambda p: p.thread_address.tlv.type == [NL_TARGET_EID_TLV] and\
p.thread_address.tlv.target_eid == MED_MLEID).\
must_next()
# Step 4: Router MUST NOT respond with an Address Notification Message
pkts.filter_wpan_src64(ROUTER).\
filter_coap_request(ADDR_NTF_URI).\
must_not_next()
# Step 5: The Leader sends an ICMPv6 Echo Request to SED and attempts to perform
# address resolution by sending an Address Query Request
pkts.filter_wpan_src64(LEADER).\
filter_RLARMA().\
filter(lambda p: p.thread_address.tlv.type == [NL_TARGET_EID_TLV] and\
p.thread_address.tlv.target_eid == SED_MLEID).\
filter_coap_request(ADDR_QRY_URI, port=MM).\
must_next()
# Step 6: Router MUST NOT respond with an Address Notification Message
pkts.filter_wpan_src64(ROUTER).\
filter_coap_request(ADDR_NTF_URI).\
must_not_next()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "269137e49af161b142a102ff25a8c8dc",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 97,
"avg_line_length": 29.935064935064936,
"alnum_prop": 0.5689804772234274,
"repo_name": "srickardti/openthread",
"id": "abb9e8cdd4a01e28ec34c3cea2dc782cf57d4bc6",
"size": "6215",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "tests/scripts/thread-cert/Cert_5_1_02_ChildAddressTimeout.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2610"
},
{
"name": "C",
"bytes": "1586867"
},
{
"name": "C++",
"bytes": "8331824"
},
{
"name": "CMake",
"bytes": "109816"
},
{
"name": "Dockerfile",
"bytes": "10410"
},
{
"name": "M4",
"bytes": "32369"
},
{
"name": "Makefile",
"bytes": "192208"
},
{
"name": "Python",
"bytes": "4622817"
},
{
"name": "Shell",
"bytes": "165383"
}
],
"symlink_target": ""
} |
from block import *
import time
from logging import ERROR, WARN, INFO, DEBUG
class counter(Block):
def do_task(self):
for i in range(20):
log = Log()
log.log["value"] = [self.count]
self.log(INFO, "Sending " + str(self.count))
self.push("output", log)
self.count = self.count + 1
time.sleep(1)
yield
def on_load(self, config):
self.count = 0
self.add_port("output", Port.PUSH, Port.UNNAMED, ["value"])
self.log(INFO, "Counter-Src block loaded") | {
"content_hash": "ddfdb5755a237e4343e7dd424d38e8ab",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 26.736842105263158,
"alnum_prop": 0.6141732283464567,
"repo_name": "mpi-sws-rse/datablox",
"id": "19f82a1a8045869f4cbe82024fbbd7f41cbfe959",
"size": "508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blox/counter__1_0/b_counter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7801"
},
{
"name": "Python",
"bytes": "378676"
},
{
"name": "Shell",
"bytes": "7299"
}
],
"symlink_target": ""
} |
from parglare import Grammar, GLRParser
def test_regex_alternative_match_bug():
"""
"""
grammar = """
A: "Begin" Eq "End";
terminals
Eq: /=|EQ/;
"""
g = Grammar.from_string(grammar)
parser = GLRParser(g)
parser.parse('Begin EQ End')
| {
"content_hash": "8dbf3422c1d9195dbff6015e1345ca69",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 39,
"avg_line_length": 17.3125,
"alnum_prop": 0.5703971119133574,
"repo_name": "igordejanovic/parglare",
"id": "99612783b16e93771ec3c1c970319d9ef909339e",
"size": "277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/func/regressions/test_regex_alternative_match_bug.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2131"
},
{
"name": "Python",
"bytes": "402620"
},
{
"name": "Shell",
"bytes": "684"
}
],
"symlink_target": ""
} |
'''
:codeauthor: :email:`Pedro Algarvio ([email protected])`
tests.unit.utils.warnings_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Test ``salt.utils.warn_until`` and ``salt.utils.kwargs_warn_until``
'''
# Import python libs
from __future__ import absolute_import
import sys
import warnings
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
from salt.utils import warn_until, kwargs_warn_until
from salt.version import SaltStackVersion
class WarnUntilTestCase(TestCase):
def test_warn_until_warning_raised(self):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings('always', '', DeprecationWarning, __name__)
def raise_warning(_version_info_=(0, 16, 0)):
warn_until(
(0, 17), 'Deprecation Message!',
_version_info_=_version_info_
)
def raise_named_version_warning(_version_info_=(0, 16, 0)):
warn_until(
'Hydrogen', 'Deprecation Message!',
_version_info_=_version_info_
)
# raise_warning should show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
raise_warning()
self.assertEqual(
'Deprecation Message!', str(recorded_warnings[0].message)
)
# raise_warning should show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
raise_named_version_warning()
self.assertEqual(
'Deprecation Message!', str(recorded_warnings[0].message)
)
# the deprecation warning is not issued because we passed
# _dont_call_warning
with warnings.catch_warnings(record=True) as recorded_warnings:
warn_until(
(0, 17), 'Foo', _dont_call_warnings=True,
_version_info_=(0, 16)
)
self.assertEqual(0, len(recorded_warnings))
# Let's set version info to (0, 17), a RuntimeError should be raised
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'0.17.0 is released. Current version is now 0.17.0. '
r'Please remove the warning.'):
raise_warning(_version_info_=(0, 17, 0))
# Let's set version info to (0, 17), a RuntimeError should be raised
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'(.*) is released. Current version is now '
r'([\d.]+). Please remove the warning.'):
raise_named_version_warning(_version_info_=(sys.maxint, 16, 0))
# Even though we're calling warn_until, we pass _dont_call_warnings
# because we're only after the RuntimeError
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'0.17.0 is released. Current version is now '
r'(.*). Please remove the warning.'):
warn_until(
(0, 17), 'Foo', _dont_call_warnings=True
)
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'(.*) is released. Current version is now '
r'(.*). Please remove the warning.'):
warn_until(
'Hydrogen', 'Foo', _dont_call_warnings=True,
_version_info_=(sys.maxint, 16, 0)
)
# version on the deprecation message gets properly formatted
with warnings.catch_warnings(record=True) as recorded_warnings:
vrs = SaltStackVersion.from_name('Helium')
warn_until(
'Helium', 'Deprecation Message until {version}!',
_version_info_=(vrs.major - 1, 0)
)
self.assertEqual(
'Deprecation Message until {0}!'.format(vrs.formatted_version),
str(recorded_warnings[0].message)
)
def test_kwargs_warn_until_warning_raised(self):
# We *always* want *all* warnings thrown on this module
warnings.filterwarnings('always', '', DeprecationWarning, __name__)
def raise_warning(**kwargs):
_version_info_ = kwargs.pop('_version_info_', (0, 16, 0))
kwargs_warn_until(
kwargs,
(0, 17),
_version_info_=_version_info_
)
# raise_warning({...}) should show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
raise_warning(foo=42) # with a kwarg
self.assertEqual(
'The following parameter(s) have been deprecated and '
'will be removed in \'0.17.0\': \'foo\'.',
str(recorded_warnings[0].message)
)
# With no **kwargs, should not show warning until version info is >= (0, 17)
with warnings.catch_warnings(record=True) as recorded_warnings:
kwargs_warn_until(
{}, # no kwargs
(0, 17),
_version_info_=(0, 16, 0)
)
self.assertEqual(0, len(recorded_warnings))
# Let's set version info to (0, 17), a RuntimeError should be raised
# regardless of whether or not we pass any **kwargs.
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'0.17.0 is released. Current version is now 0.17.0. '
r'Please remove the warning.'):
raise_warning(_version_info_=(0, 17)) # no kwargs
with self.assertRaisesRegexp(
RuntimeError,
r'The warning triggered on filename \'(.*)warnings_test.py\', '
r'line number ([\d]+), is supposed to be shown until version '
r'0.17.0 is released. Current version is now 0.17.0. '
r'Please remove the warning.'):
raise_warning(bar='baz', qux='quux', _version_info_=(0, 17)) # some kwargs
if __name__ == '__main__':
from integration import run_tests
run_tests(WarnUntilTestCase, needs_daemon=False)
| {
"content_hash": "3ab7627a725b331b9d97c33413636911",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 87,
"avg_line_length": 41.146198830409354,
"alnum_prop": 0.5596930073905628,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "2fd94e17ccf3a2b27f918ad4a2034cd90bdbf48b",
"size": "7060",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/tests/unit/utils/warnings_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.