repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
charanpald/APGL | apgl/__init__.py | 1 | 2494 |
from apgl.version import __version__
def checkImport(name):
"""
A function to check if the given module exists by importing it.
"""
try:
import imp
imp.find_module(name)
return True
except ImportError as error:
return False
def getPythonVersion():
"""
Get the python version number as a floating point value.
"""
import sys
version = sys.version_info
version = version[0] + version[1]/10.0 + version[2]/100.0
return version
def skip(reason):
"""
A docorator for test skipping.
"""
version = getPythonVersion()
if version >= 2.7:
import unittest
return unittest.skip(reason)
else:
import unittest2
return unittest2.skip(reason)
def skipIf(condition, reason):
"""
A docorator for test skipping.
"""
version = getPythonVersion()
if version >= 2.7:
import unittest
return unittest.skipIf(condition, reason)
else:
import unittest2
return unittest2.skipIf(condition, reason)
def test():
"""
A function which uses the unittest library to find all tests within apgl (those files
matching "*Test.py"), and run those tests. In python 2.7 and above the unittest framework
is used otherwise one needs unittest2 for python 2.3-2.6.
"""
try:
import traceback
import sys
import os
import logging
from apgl.util.PathDefaults import PathDefaults
logging.disable(logging.WARNING)
#logging.disable(logging.INFO)
sourceDir = PathDefaults.getSourceDir()
print("Running tests from " + sourceDir)
version = getPythonVersion()
if version >= 2.7:
import unittest
else:
import unittest2 as unittest
overallTestSuite = unittest.TestSuite()
overallTestSuite.addTest(unittest.defaultTestLoader.discover(os.path.join(sourceDir, "generator"), pattern='*Test.py', top_level_dir=sourceDir))
overallTestSuite.addTest(unittest.defaultTestLoader.discover(os.path.join(sourceDir, "graph"), pattern='*Test.py', top_level_dir=sourceDir))
overallTestSuite.addTest(unittest.defaultTestLoader.discover(os.path.join(sourceDir, "util"), pattern='*Test.py', top_level_dir=sourceDir))
unittest.TextTestRunner(verbosity=1).run(overallTestSuite)
except ImportError as error:
traceback.print_exc(file=sys.stdout)
| bsd-3-clause | 5,971,958,496,822,752,000 | 28.341176 | 152 | 0.64595 | false |
Lavande/wx-post-generator | wxfancypic.py | 1 | 2823 | from flask import Flask,request,render_template,url_for,send_file
from werkzeug.contrib.fixers import ProxyFix
import hashlib
import xmltodict
import subprocess
import time
##BASIC SETTINGS##
#set the server's url here
urlbase = 'http://'
#set your token here
token = ''
#currently 2 options: xmas and lomolive (as in xmas.py and lomolive.py)
gen_mode = 'xmas'
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
if gen_mode == 'xmas':
import xmas
ok_msg = xmas.ok_msg
auto_reply = xmas.auto_reply
elif gen_mode == 'lomolive':
import lomolive
ok_msg = lomolive.ok_msg
auto_reply = lomolive.auto_reply
def Isfromwx(request):
signature = request.args.get('signature', '')
timestamp = request.args.get('timestamp', '')
nonce = request.args.get('nonce', '')
#echostr = request.args.get('echostr', '')
L = [token, timestamp, nonce]
L.sort()
s = L[0] + L[1] + L[2]
s = s.encode('utf8')
if hashlib.sha1(s).hexdigest() == signature:
return True
else:
return False
def xml_msg(user, msg, fromuser):
'''format the raw message into xml'''
template = '''
<xml>
<ToUserName><![CDATA[{0}]]></ToUserName>
<FromUserName><![CDATA[{3}]]></FromUserName>
<CreateTime>{1}</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[{2}]]></Content>
</xml>'''
return template.format(user, int(time.time()), msg, fromuser)
@app.route('/', methods=['POST', 'GET'])
def get_msg():
if Isfromwx(request):
if request.method == 'POST':
d = request.data
d = str(d, 'utf-8')
d = xmltodict.parse(d)
FromUserName = d['xml']['FromUserName']
MsgType = d['xml']['MsgType']
me = d['xml']['ToUserName']
if MsgType == 'image':
MediaId = d['xml']['MediaId']
PicUrl = d['xml']['PicUrl']
subprocess.call(['wget', PicUrl, '-O', 'media/' + MediaId])
if gen_mode == 'xmas':
xmas.gen_pic(MediaId)
elif gen_mode == 'lomolive':
lomolive.gen_pic(MediaId)
result_url = urlbase + url_for('pic', MediaId=MediaId)
msg = ok_msg + result_url
xml = xml_msg(FromUserName, msg, me)
print(xml)
return xml
else:
#save user's text msg into a file as a backup
if MsgType == 'text':
with open('user_msg', 'a') as f: f.write(str(d)+'\n')
#default auto-reply if we received a non-image message
msg = auto_reply
xml = xml_msg(FromUserName, msg, me)
return xml
#show a blank page for website visitors
if request.method == 'GET':
return 'nothing'
@app.route('/pic/<MediaId>')
def pic(MediaId):
'''generates the web page that contains the picture'''
mediaurl = url_for('media', filename=MediaId+'.jpg')
return render_template('pic.html', pic_path=mediaurl)
@app.route('/media/<filename>')
def media(filename):
'''returns the media file (i.e., the picture)'''
path = 'media/' + filename
return send_file(path)
| mit | -1,949,566,384,466,096,600 | 23.336207 | 71 | 0.647892 | false |
TurpIF/gestures-learning | learner_finger.py | 1 | 8056 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
this_dir = os.path.dirname(os.path.realpath(__name__))
sys.path.insert(0, os.path.join(this_dir, 'libs', 'leap'))
import Leap
import time
import sys
class FingerPoint(object):
"""
All usefull information about a finger.
Arguments:
position -- (Real, Real, Real) : Position of the finger
velocity -- (Real, Real, Real) : Velocity of the finger
direction -- (Real, Real, Real) : Direction vector of the finger
"""
def __init__(self, position, velocity, direction):
super(FingerPoint, self).__init__()
self.position = position
self.velocity = velocity
self.direction = direction
def __str__(self):
"""
String representation of the point.
Returns:
String representation
"""
info = list(self.position)
info += list(self.velocity)
info += list(self.direction)
# return '%f %f %f' % tuple(self.position)
return '%f %f %f %f %f %f %f %f %f' % tuple(info)
def to_tuple(self):
"""
Resume the point to a unique tuple corresponding to (posx, posy, posz,
velx, vely, velz, dirx, diry, dirz).
Returns:
A tuple representation of the point.
"""
ls = [self.position, self.velocity, self.direction]
# ls = [self.position]
return tuple(sum(map(list, ls), []))
@classmethod
def from_words(cls, words):
"""
Construct a *FingerPoint* from a list of words *words*. Consum the read
words from the list.
Arguments:
words -- List of words to read in
Returns:
Constructed *FingerPoint*
Raises:
ValueError : if there is not enough words to read or if read words are
not real number.
"""
if len(words) < 9:
raise ValueError('Not enough words to read a FingerPoint.')
X = [words.pop(0) for _ in xrange(9)]
try:
X = map(float, X)
except ValueError as e:
raise e
position = X[0], X[1], X[2]
# velocity = None
# direction = None
velocity = X[3], X[4], X[5]
direction = X[6], X[7], X[8]
return cls(position, velocity, direction)
class FingerMove(object):
"""
Basic structure representing a move of a finger.
Arguments:
name -- String : Name of the move
data -- [FingerPoint] : Temporal list of finger points
"""
def __init__(self, name, data):
assert len(data) >= 1
super(FingerMove, self).__init__()
self.name = name
self.data = data
def __str__(self):
"""
String representation of the move.
Returns:
String representation
"""
pos_str = map(str, self.data)
return '%s %s' % (self.name, ' '.join(pos_str))
def to_tuple(self):
"""
Resume the move to a unique tuple.
Returns:
A tuple representation of the move.
"""
d = sum(map(lambda x: list(x.to_tuple()), self.data), [])
return tuple([self.name] + d)
@classmethod
def from_string(cls, string):
"""
Construct a *FingerMove* from a string.
Arguments:
string -- Input string to transfom into Move
Returns:
The constructed move
Raises:
ValueError : When the string format is not good
"""
words = string.split(' ')
if len(words) < 2:
raise ValueError('A move have to contain a minimum of a name.')
name = words.pop(0)
data = []
while words:
try:
data.append(FingerPoint.from_words(words))
except ValueError as e:
raise e
return cls(name, data)
def save(self, file=sys.stdout):
"""
Write the moves into the file *file*
Arguments:
file -- File : File to write in
Raises:
IOError : When it's impossible to write into the file
"""
try:
file.write(str(self) + '\n')
except IOError:
raise
def acquire_move(ctrl, size, time_sleep=0.005):
"""
Get a mouse move with a size of *size* points.
Arguments:
ctrl -- Leap.Controller : Controller to use
size -- Integer : The number of position taken for the move
time_sleep -- Real : Time to sleep between taking the positions (default
0.005)
Returns:
[Real] : A list of size *size* containing the moves (dx, dy).
Raises:
RuntimeError : When there is a problem during the acquisition (more than
one hand or finger or the finger disappear, ...)
"""
while len(ctrl.frame().hands) != 1 and len(ctrl.frame().hands[0].fingers) != 1:
time.sleep(0.001)
frame = ctrl.frame()
id_hand = frame.hands[0].id
id_finger = frame.hands[0].fingers[0].id
finger = frame.hands[0].fingers[0]
o_pos = finger.tip_position.x, finger.tip_position.y, finger.tip_position.z
move = []
for _ in xrange(size):
frame = ctrl.frame()
if len(frame.hands) != 1:
raise RuntimeError('Data acquisition stop by hands.')
if frame.hands[0].id != id_hand:
raise RuntimeError('Data acquisition stop by hand\'s id.')
if len(frame.hands[0].fingers) != 1:
raise RuntimeError('Data acquisition stop by fingers.')
if frame.hands[0].fingers[0].id != id_finger:
raise RuntimeError('Data acquisition stop by finger\'s id.')
finger = frame.hands[0].fingers[0]
f_pos = finger.tip_position.x, \
finger.tip_position.y, \
finger.tip_position.z
f_vel = finger.tip_velocity.x, \
finger.tip_velocity.y, \
finger.tip_velocity.z
f_dir = finger.direction.x, \
finger.direction.y, \
finger.direction.z
f_dpos = map(lambda x: x[0] - x[1], zip(f_pos, o_pos))
point = FingerPoint(f_dpos, f_vel, f_dir)
move.append(point)
time.sleep(time_sleep)
return move
def wait_move(ctrl, static_threashold=20):
"""
Wait and block until there is only one hand and one finger and the finger
move by *static_threashold* distance.
Arguments:
ctrl -- Leap.Controller : Controller to use
static_threashold -- Real : Distance the finger has to move (default 20)
"""
origin = None
while True:
time.sleep(0.01)
frame = ctrl.frame()
if len(frame.hands) != 1:
origin = None
continue
if len(frame.hands[0].fingers) != 1:
origin = None
continue
if origin is None:
origin = frame.hands[0].fingers[0].tip_position
continue
p = frame.hands[0].fingers[0].tip_position
if abs(p.x - origin.x) + abs(p.y - origin.y) >= static_threashold:
break
if __name__ == '__main__':
ctrl = Leap.Controller()
while not ctrl.is_connected:
time.sleep(0.001)
cont = True
moves = []
size = 100
print 'Move name ?',
name = raw_input()
while cont:
print 'Waiting the beginning of the move...'
wait_move(ctrl)
print 'Recording the move...'
try:
move = FingerMove(name, acquire_move(ctrl, size))
except RuntimeError as e:
print e.message
else:
print 'Keep it ? (y/n)',
if raw_input() == 'y':
moves.append(move)
print 'Continue ? (y/n)',
cont = raw_input() == 'y'
if moves:
_f_name = name.lower() + '.mv'
print 'Save moves into ? [%s]' % _f_name,
f_name = raw_input()
if not f_name:
f_name = _f_name
print 'Saving into %s...' % f_name,
with open(f_name, 'w+') as f:
for m in moves:
m.save(f)
print 'OK'
| mit | 7,622,142,636,752,140,000 | 27.874552 | 83 | 0.546177 | false |
FichteFoll/CSScheme | my_sublime_lib/__init__.py | 1 | 8904 | from sublime_plugin import WindowCommand, TextCommand
import sublime
__all__ = ['ST2', 'ST3', 'WindowAndTextCommand', 'Settings', 'FileSettings']
ST2 = sublime.version().startswith('2')
ST3 = not ST2
class WindowAndTextCommand(WindowCommand, TextCommand):
"""A class to derive from when using a Window- and a TextCommand in one
class (e.g. when you make a build system that should/could also be called
from the command palette with the view in its focus).
Defines both self.view and self.window.
Be careful that self.window may be ``None`` when called as a
TextCommand because ``view.window()`` is not really safe and will
fail in quite a few cases. Since the compromise of using
``sublime.active_window()`` in that case is not wanted by every
command I refused from doing so. Thus, the command's on duty to check
whether the window is valid.
Since this class derives from both Window- and a TextCommand it is also
callable with the known methods, like
``window.run_command("window_and_text")``.
I defined a dummy ``run`` method to prevent parameters from raising an
exception so this command call does exactly nothing.
Still a better method than having the parent class (the command you
will define) derive from three classes with the limitation that this
class must be the first one (the *Command classes do not use super()
for multi-inheritance support; neither do I but apparently I have
reasons).
"""
def __init__(self, param):
# no super() call! this would get the references confused
if isinstance(param, sublime.Window):
self.window = param
self._window_command = True # probably called from build system
self.typ = WindowCommand
elif isinstance(param, sublime.View):
self.view = param
self._window_command = False
self.typ = TextCommand
else:
raise TypeError("Something really bad happened and you are responsible")
self._update_members()
def _update_members(self):
if self._window_command:
self.view = self.window.active_view()
else:
self.window = self.view.window()
def run_(self, *args):
"""Wraps the other run_ method implementations from sublime_plugin.
Required to update the self.view and self.window variables.
"""
self._update_members()
# Obviously `super` does not work here
self.typ.run_(self, *args)
class Settings(object):
"""Helper class for accessing sublime.Settings' values.
Settings(settings, none_erases=False)
* settings (sublime.Settings)
Should be self-explanatory.
* none_erases (bool, optional)
Iff ``True`` a setting's key will be erased when setting it to
``None``. This only has a meaning when the key you erase is
defined in a parent Settings collection which would be
retrieved in that case.
Defines the default methods for sublime.Settings:
get(key, default=None)
set(key, value)
erase(key)
has(key)
add_on_change(key, on_change)
clear_on_change(key, on_change)
http://www.sublimetext.com/docs/2/api_reference.html#sublime.Settings
If ``none_erases == True`` you can erase a key when setting it to
``None``. This only has a meaning when the key you erase is defined in
a parent Settings collection which would be retrieved in that case.
The following methods can be used to retrieve a setting's value:
value = self.get('key', default)
value = self['key']
value = self.key_without_spaces
The following methods can be used to set a setting's value:
self.set('key', value)
self['key'] = value
self.key_without_spaces = value
The following methods can be used to erase a key in the setting:
self.erase('key')
self.set('key', None) or similar # iff ``none_erases == True``
del self.key_without_spaces
! Important:
Don't use the attribute method with one of these keys; ``dir(Settings)``:
['__class__', '__delattr__', '__dict__', '__doc__', '__format__',
'__getattr__', '__getattribute__', '__getitem__', '__hash__',
'__init__', '__module__', '__new__', '__reduce__', '__reduce_ex__',
'__repr__', '__setattr__', '__setitem__', '__sizeof__', '__str__',
'__subclasshook__', '__weakref__',
'_none_erases', '_s', '_settable_attributes',
'add_on_change', 'clear_on_change',
'erase', 'get', 'has', 'set']
Getting will return the respective function/value, setting will do
nothing. Setting of _leading_underline_values from above will result in
unpredictable behavior. Please don't do this! And re-consider even when
you know what you're doing.
"""
_none_erases = False
_s = None
_settable_attributes = ('_s', '_none_erases') # allow only setting of these attributes
def __init__(self, settings, none_erases=False):
if not isinstance(settings, sublime.Settings):
raise ValueError("Not an instance of sublime.Settings")
self._s = settings
self._none_erases = none_erases
def get(self, key, default=None):
"""Returns the named setting, or ``default`` if it's not defined.
"""
return self._s.get(key, default)
def set(self, key, value):
"""Sets the named setting. Only primitive types, lists, and
dictionaries are accepted.
Erases the key iff ``value is None``.
"""
if value is None and self._none_erases:
self.erase(key)
else:
self._s.set(key, value)
def erase(self, key):
"""Removes the named setting. Does not remove it from any parent Settings.
"""
self._s.erase(key)
def has(self, key):
"""Returns true iff the named option exists in this set of Settings or
one of its parents.
"""
return self._s.has(key)
def add_on_change(self, key, on_change):
"""Register a callback to be run whenever the setting with this key in
this object is changed.
"""
self._s.add_on_change(key, on_change)
def clear_on_change(self, key, on_change):
"""Remove all callbacks registered with the given key.
"""
self._s.clear_on_change(key, on_change)
def __getitem__(self, key):
"""self[key]"""
return self.get(key)
def __setitem__(self, key, value):
"""self[key] = value"""
self.set(key, value)
def __getattr__(self, key):
"""self.key_without_spaces"""
return self.get(key)
def __setattr__(self, key, value):
"""self.key_without_spaces = value"""
if key in self._settable_attributes:
object.__setattr__(self, key, value)
else:
self.set(key, value)
def __delattr__(self, key):
"""del self.key_without_spaces"""
if key in dir(self):
return
else:
self.erase(key)
class FileSettings(Settings):
"""Helper class for accessing sublime.Settings' values.
Derived from sublime_lib.Settings. Please also read the documentation
there.
FileSettings(name, none_erases=False)
* name (str)
The file name that's passed to sublime.load_settings().
* none_erases (bool, optional)
Iff ``True`` a setting's key will be erased when setting it to
``None``. This only has a meaning when the key you erase is
defined in a parent Settings collection which would be
retrieved in that case.
Defines the following extra methods:
save()
Flushes in-memory changes to the disk
See: sublime.save_settings(name)
Adds these attributes to the list of unreferable attribute names for
settings:
['_name', 'save']
Please compare with the list from sublime_lib.Settings or
``dir(FileSettings)``.
"""
_name = ""
_settable_attributes = ('_s', '_name', '_none_erases') # allow only setting of these attributes
def __init__(self, name, none_erases=False):
settings = sublime.load_settings(name)
if not settings:
raise ValueError('Could not create settings from name "%s"' % name)
self._name = name
super(FileSettings, self).__init__(settings, none_erases)
def save(self):
sublime.save_settings(self._name)
| mit | -872,804,802,803,249,200 | 34.903226 | 100 | 0.596698 | false |
craigderington/striker_api | tutorial/tutorial/settings.py | 1 | 3252 | """
Django settings for tutorial project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'or_aadirpo#*ktg_m!t5b8elubza*pahe&0*sr1cz&o@1)j@rm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'snippets',
]
REST_FRAMEWORK = {
'PAGE_SIZE': 10
}
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tutorial.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'tutorial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| gpl-3.0 | 3,665,217,307,449,652,700 | 24.606299 | 91 | 0.686962 | false |
HongxuChen/dotfiles | _ipython/extensions/pep8_magic.py | 1 | 1477 | # magic function that checks a cell for pep8 compliance
# %%pep8
# a=1
# should give an error about missing spaces
import sys
import tempfile
import io
import logging
from IPython.core.magic import register_cell_magic
def load_ipython_extension(ipython):
# The `ipython` argument is the currently active `InteractiveShell`
# instance, which can be used in any way. This allows you to register
# new magics or aliases, for example.
pass
def unload_ipython_extension(ipython):
# If you want your extension to be unloadable, put that logic here.
pass
@register_cell_magic
def pep8(line, cell):
"""pep8 cell magic"""
import pep8
logger = logging.getLogger('pep8')
logger.setLevel(logging.INFO)
# output is written to stdout
# remember and replace
old_stdout = sys.stdout
# temporary replace
sys.stdout = io.BytesIO()
# store code in a file, todo unicode
with tempfile.NamedTemporaryFile() as f:
# save to file
f.write(cell + "\n")
# make sure it's written
f.flush()
# now we can check the file by name.
# we might be able to use 'stdin', have to check implementation
pep8style = pep8.StyleGuide()
# check the filename
pep8style.check_files(paths=[f.name])
# split lines
stdout = sys.stdout.getvalue().splitlines()
for line in stdout:
logger.info(line)
# restore
sys.stdout = old_stdout
return
| mit | 9,202,553,753,179,302,000 | 26.351852 | 73 | 0.66283 | false |
sagivba/MachineLearningUtils | Tests/UnitTests/_BasePlot_UnitTest.py | 1 | 1666 | import unittest
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn import datasets
from MachineLearningUtils.UsefulPlots import _BasePlot
from MachineLearningUtils.DatasetTools import DatasetsTools
class _BasePlotTestCase(unittest.TestCase):
def setUp(self):
self.iris_df = DatasetsTools(datasets.load_iris).data_as_df(target_column_name="target")
self.boston_df = DatasetsTools(datasets.load_boston).data_as_df()
self.fig = plt.figure()
self.iris___BasePlot = _BasePlot(df=self.iris_df)
def test_set_fig(self):
created_fig = _BasePlot._set_fig()
self.assertTrue(isinstance(created_fig, plt.Figure))
created_fig = _BasePlot._set_fig(self.fig)
self.assertTrue(isinstance(created_fig, plt.Figure))
def test_set_ax(self):
created_ax = _BasePlot._set_ax()
ax = plt.figure().gca()
self.assertTrue(isinstance(created_ax, plt.Axes))
created_fig = _BasePlot._set_fig(plt.figure())
self.assertTrue(isinstance(created_fig, plt.Figure))
created_ax = _BasePlot._set_ax(created_fig)
self.assertTrue(isinstance(created_ax, plt.Figure))
def test_set_df(self):
bp = self.iris___BasePlot
iris_df = bp._set_df()
expected_columns_lst = list(self.iris_df)
actual_columns_lst = list(iris_df)
self.assertEquals(actual_columns_lst, expected_columns_lst)
boston_df = bp._set_df(self.boston_df)
expected_columns_lst = list(self.boston_df)
actual_columns_lst = list(boston_df)
self.assertEquals(actual_columns_lst, expected_columns_lst)
| mit | -8,488,111,132,523,062,000 | 36.022222 | 96 | 0.672269 | false |
gersolar/noaadem | setup.py | 1 | 4971 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
from setuptools.command.install import install
import os
import subprocess
from urllib2 import urlopen
import zipfile
import sys
import time
def parse_requirements(filename):
return list(filter(lambda line: (line.strip())[0] != '#',
[line.strip() for line in open(filename).readlines()]))
def calculate_version():
# Fetch version from git tags, and write to version.py.
# Also, when git is not available (PyPi package), use stored version.py.
version_py = os.path.join(os.path.dirname(__file__), 'version.py')
try:
git_version = subprocess.check_output(["git", "describe"]).rstrip()
except Exception:
with open(version_py, 'r') as filehandler:
git_version = (open(version_py).read()
.strip().split('=')[-1].replace('"', ''))
version_msg = ('# Do not edit this file, pipeline versioning is '
'governed by git tags')
with open(version_py, 'w') as filehandler:
filehandler.write(version_msg + os.linesep + "__version__=" +
git_version)
return git_version
REQUIREMENTS = parse_requirements('requirements.txt')
VERSION_GIT = calculate_version()
def get_long_description():
readme_file = 'README.md'
if not os.path.isfile(readme_file):
return ''
# Try to transform the README from Markdown to reStructuredText.
try:
import pandoc
pandoc.core.PANDOC_PATH = 'pandoc'
doc = pandoc.Document()
doc.markdown = open(readme_file).read()
description = doc.rst
except Exception:
description = open(readme_file).read()
return description
source = "http://www.ngdc.noaa.gov/mgg/topo/DATATILES/elev/all10g.zip"
localpath = os.path.expanduser('~/noaadem')
destiny = '%s/%s' % (localpath, source.split("/")[-1])
class DataInstaller(object):
def create_path(self):
if not os.path.exists(localpath):
os.makedirs(localpath)
def download(self, source, destiny):
self.create_path()
print "Downloading %s..." % source
system = os.uname()[-1]
self.chunk_size = 2048
if system == 'armv6l':
self.chunk_size = 16
f = urlopen(source)
CHUNK = self.chunk_size * 1024
with open(destiny, "wb") as local_file:
while True:
chunk = f.read(CHUNK)
if not chunk: break
local_file.write(chunk)
sys.stdout.write('.')
sys.stdout.flush()
print "The file was downloaded to %s." % destiny
def unzip(self, source, path_destiny):
print "Decompressing the file %s..." % source
with zipfile.ZipFile(source) as zf:
for member in zf.infolist():
words = member.filename.split('/')
path = path_destiny
for word in words[:-1]:
word = os.path.splitdrive(word)[1]
word = os.path.split(word)[1]
if word in (os.curdir, os.pardir, ''): continue
path = os.path.join(path, word)
zf.extract(member, path)
print "The file %s was decompressed." % destiny
def obtain(self, source, destiny):
if not os.path.exists(destiny):
self.download(source, destiny)
self.unzip(destiny, localpath+"/")
def deploy(self):
self.obtain(source, destiny)
class install_wrapper(install):
def finalize_options(self):
data_installer = DataInstaller()
data_installer.deploy()
sys.stdout.write("The noaadem package create the forlder %s to "
"save the DEM maps." % localpath)
return install.finalize_options(self)
setup(
name='noaadem',
version=VERSION_GIT,
author=u'Eloy Adonis Colell',
author_email='[email protected]',
packages=['noaadem'],
url='https://github.com/gersolar/noaadem',
license='MIT',
description='A python library that simplify the access to the NOAA digital'
'elevation map.',
long_description=get_long_description(),
zip_safe=True,
install_requires=REQUIREMENTS,
classifiers=[
"Intended Audience :: Science/Research",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic :: Scientific/Engineering :: Bio-Informatics",
"Topic :: Scientific/Engineering :: GIS",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
],
cmdclass={
'install': install_wrapper
},
)
| mit | 245,445,928,350,427,260 | 32.362416 | 79 | 0.602696 | false |
salv-orlando/MyRepo | nova/db/sqlalchemy/migrate_repo/versions/054_add_bw_usage_data_cache.py | 1 | 2133 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 MORITA Kazutaka.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, Table, MetaData
from sqlalchemy import Integer, BigInteger, DateTime, Boolean, String
from nova import log as logging
meta = MetaData()
bw_cache = Table('bw_usage_cache', meta,
Column('created_at', DateTime(timezone=False)),
Column('updated_at', DateTime(timezone=False)),
Column('deleted_at', DateTime(timezone=False)),
Column('deleted', Boolean(create_constraint=True, name=None)),
Column('id', Integer(), primary_key=True, nullable=False),
Column('instance_id', Integer(), nullable=False),
Column('network_label',
String(length=255, convert_unicode=False, assert_unicode=None,
unicode_error=None, _warn_on_bytestring=False)),
Column('start_period', DateTime(timezone=False), nullable=False),
Column('last_refreshed', DateTime(timezone=False)),
Column('bw_in', BigInteger()),
Column('bw_out', BigInteger()))
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine;
# bind migrate_engine to your metadata
meta.bind = migrate_engine
try:
bw_cache.create()
except Exception:
logging.info(repr(bw_cache))
logging.exception('Exception while creating table')
meta.drop_all(tables=[bw_cache])
raise
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
bw_cache.drop()
| apache-2.0 | -5,622,963,119,208,717,000 | 36.421053 | 78 | 0.680263 | false |
thorwhalen/ut | scrap/xgoogle/googlesets.py | 1 | 2504 | #!/usr/bin/python
#
# Peteris Krumins ([email protected])
# http://www.catonmat.net -- good coders code, great reuse
#
# http://www.catonmat.net/blog/python-library-for-google-sets/
#
# Code is licensed under MIT license.
#
import re
import urllib.request, urllib.parse, urllib.error
import random
from html.entities import name2codepoint
from .BeautifulSoup import BeautifulSoup
from .browser import Browser, BrowserError
class GSError(Exception):
""" Google Sets Error """
pass
class GSParseError(Exception):
"""
Parse error in Google Sets results.
self.msg attribute contains explanation why parsing failed
self.tag attribute contains BeautifulSoup object with the most relevant tag that failed to parse
Thrown only in debug mode
"""
def __init__(self, msg, tag):
self.msg = msg
self.tag = tag
def __str__(self):
return self.msg
def html(self):
return self.tag.prettify()
LARGE_SET = 1
SMALL_SET = 2
class GoogleSets(object):
URL_LARGE = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Large+Set"
URL_SMALL = "http://labs.google.com/sets?hl=en&q1=%s&q2=%s&q3=%s&q4=%s&q5=%s&btn=Small+Set+(15+items+or+fewer)"
def __init__(self, items, random_agent=False, debug=False):
self.items = items
self.debug = debug
self.browser = Browser(debug=debug)
if random_agent:
self.browser.set_random_user_agent()
def get_results(self, set_type=SMALL_SET):
page = self._get_results_page(set_type)
results = self._extract_results(page)
return results
def _maybe_raise(self, cls, *arg):
if self.debug:
raise cls(*arg)
def _get_results_page(self, set_type):
if set_type == LARGE_SET:
url = GoogleSets.URL_LARGE
else:
url = GoogleSets.URL_SMALL
safe_items = [urllib.parse.quote_plus(i) for i in self.items]
blank_items = 5 - len(safe_items)
if blank_items > 0:
safe_items += ['']*blank_items
safe_url = url % tuple(safe_items)
try:
page = self.browser.get_page(safe_url)
except BrowserError as e:
raise GSError("Failed getting %s: %s" % (e.url, e.error))
return BeautifulSoup(page)
def _extract_results(self, soup):
a_links = soup.findAll('a', href=re.compile('/search'))
ret_res = [a.string for a in a_links]
return ret_res
| mit | -2,047,066,397,241,556,500 | 27.134831 | 115 | 0.621805 | false |
avoyages/etfa2015 | mtf_python3_v1/mtf/utils_b.py | 1 | 1538 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Modbus TestKit: Implementation of Modbus protocol in python
(C)2009 - Luc Jean - [email protected]
(C)2009 - Apidev - http://www.apidev.fr
This is distributed under GNU LGPL license, see license.txt
"""
import threading
import logging
import socket
import select
import modbus_tk.utils as utils
def threadsafe_fun(fcn):
"""decorator making sure that the decorated function is thread safe"""
lock = threading.Lock()
def new(*args, **kwargs):
"""lock and call the decorated function"""
lock.acquire()
try:
ret = fcn(*args, **kwargs)
except Exception as excpt:
raise excpt
finally:
lock.release()
return ret
return new
def flush_socket_b(socks, lim=0):
"""remove the data present on the socket"""
input_socks = [socks]
cnt = 0
while 1:
i_socks, o_socks, e_socks = select.select(input_socks, input_socks, input_socks, 0.0)
if len(i_socks)==0:
break
for sock in i_socks:
sock.recv(1024)
if lim>0:
cnt += 1
if cnt>=lim:
#avoid infinite loop due to loss of connection
raise Exception("flush_socket: maximum number of iterations reached")
def get_log_buffer_b(prefix, buff):
"""Format binary data into a string for debug purpose"""
log = prefix
for i in buff:
log += str(ord(i)) + "-"
return log[:-1] | gpl-3.0 | 1,101,563,569,886,617,100 | 26.981818 | 93 | 0.583875 | false |
mdaif/olympia | apps/users/views.py | 1 | 26488 | import functools
from datetime import datetime
from functools import partial
from django import http
from django.conf import settings
from django.contrib import auth
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.auth.tokens import default_token_generator
from django.db import IntegrityError
from django.db.models import Q, Sum
from django.shortcuts import (get_list_or_404, get_object_or_404, redirect,
render)
from django.template import Context, loader
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.views.decorators.cache import never_cache
import commonware.log
import waffle
from mobility.decorators import mobile_template
from session_csrf import anonymous_csrf, anonymous_csrf_exempt
from tower import ugettext as _
import amo
import users.notifications as notifications
from abuse.models import send_abuse_report
from access import acl
from access.middleware import ACLMiddleware
from addons.decorators import addon_view_factory
from addons.models import Addon, AddonUser, Category
from amo import messages
from amo.decorators import (json_view, login_required, permission_required,
post_required, write)
from amo.forms import AbuseForm
from amo.urlresolvers import get_url_prefix, reverse
from amo.utils import escape_all, log_cef, send_mail
from bandwagon.models import Collection
from browse.views import PersonasFilter
from translations.query import order_by_translation
from users.models import UserNotification
import tasks
from . import forms
from .models import UserProfile
from .signals import logged_out
from .utils import EmailResetCode, UnsubscribeCode
log = commonware.log.getLogger('z.users')
addon_view = addon_view_factory(qs=Addon.objects.valid)
THEMES_LIMIT = 20
def user_view(f):
@functools.wraps(f)
def wrapper(request, user_id, *args, **kw):
"""Provides a user object given a user ID or username."""
if user_id.isdigit():
key = 'id'
else:
key = 'username'
# If the username is `me` then show the current user's profile.
if (user_id == 'me' and request.user and
request.user.username):
user_id = request.user.username
user = get_object_or_404(UserProfile, **{key: user_id})
return f(request, user, *args, **kw)
return wrapper
@login_required(redirect=False)
@json_view
def ajax(request):
"""Query for a user matching a given email."""
if 'q' not in request.GET:
raise http.Http404()
data = {'status': 0, 'message': ''}
email = request.GET.get('q', '').strip()
if not email:
data.update(message=_('An email address is required.'))
return data
user = UserProfile.objects.filter(email=email)
msg = _('A user with that email address does not exist.')
if user:
data.update(status=1, id=user[0].id, name=user[0].name)
else:
data['message'] = msg
return escape_all(data)
@user_view
def confirm(request, user, token):
if not user.confirmationcode:
return redirect('users.login')
if user.confirmationcode != token:
log.info(u"Account confirmation failed for user (%s)", user)
messages.error(request, _('Invalid confirmation code!'))
return redirect('users.login')
user.confirmationcode = ''
user.save()
messages.success(request, _('Successfully verified!'))
log.info(u"Account confirmed for user (%s)", user)
return redirect('users.login')
@user_view
def confirm_resend(request, user):
if not user.confirmationcode:
return redirect('users.login')
# Potential for flood here if someone requests a confirmationcode and then
# re-requests confirmations. We may need to track requests in the future.
log.info(u"Account confirm re-requested for user (%s)", user)
user.email_confirmation_code()
msg = _(u'An email has been sent to your address {0} to confirm '
u'your account. Before you can log in, you have to activate '
u'your account by clicking on the link provided in this '
u'email.').format(user.email)
messages.info(request, _('Confirmation Email Sent'), msg)
return redirect('users.login')
@login_required
def delete(request):
amouser = request.user
if request.method == 'POST':
form = forms.UserDeleteForm(request.POST, request=request)
if form.is_valid():
messages.success(request, _('Profile Deleted'))
amouser.anonymize()
logout(request)
form = None
return http.HttpResponseRedirect(reverse('users.login'))
else:
form = forms.UserDeleteForm()
return render(request, 'users/delete.html',
{'form': form, 'amouser': amouser})
@login_required
def delete_photo(request):
u = request.user
if request.method == 'POST':
u.picture_type = ''
u.save()
log.debug(u"User (%s) deleted photo" % u)
tasks.delete_photo.delay(u.picture_path)
messages.success(request, _('Photo Deleted'))
return http.HttpResponseRedirect(reverse('users.edit') +
'#user-profile')
return render(request, 'users/delete_photo.html', dict(user=u))
@write
@login_required
def edit(request):
# Don't use request.user since it has too much caching.
amouser = UserProfile.objects.get(pk=request.user.id)
if request.method == 'POST':
# ModelForm alters the instance you pass in. We need to keep a copy
# around in case we need to use it below (to email the user)
original_email = amouser.email
form = forms.UserEditForm(request.POST, request.FILES, request=request,
instance=amouser)
if form.is_valid():
messages.success(request, _('Profile Updated'))
if amouser.email != original_email:
l = {'user': amouser,
'mail1': original_email,
'mail2': amouser.email}
log.info(u"User (%(user)s) has requested email change from "
u"(%(mail1)s) to (%(mail2)s)" % l)
messages.info(
request, _('Email Confirmation Sent'),
_(u'An email has been sent to {0} to confirm your new '
u'email address. For the change to take effect, you '
u'need to click on the link provided in this email. '
u'Until then, you can keep logging in with your '
u'current email address.').format(amouser.email))
token, hash_ = EmailResetCode.create(amouser.id, amouser.email)
url = '%s%s' % (settings.SITE_URL,
reverse('users.emailchange',
args=[amouser.id, token, hash_]))
t = loader.get_template('users/email/emailchange.ltxt')
c = {'domain': settings.DOMAIN, 'url': url}
send_mail(
_('Please confirm your email address '
'change at %s' % settings.DOMAIN),
t.render(Context(c)), None, [amouser.email],
use_blacklist=False, real_email=True)
# Reset the original email back. We aren't changing their
# address until they confirm the new one
amouser.email = original_email
form.save()
return redirect('users.edit')
else:
messages.error(
request,
_('Errors Found'),
_('There were errors in the changes you made. Please correct '
'them and resubmit.'))
else:
form = forms.UserEditForm(instance=amouser, request=request)
return render(request, 'users/edit.html',
{'form': form, 'amouser': amouser})
def tshirt_eligible(user):
MIN_PERSONA_ADU = 10000
return (
user.t_shirt_requested or
AddonUser.objects.filter(
user=user,
role__in=(amo.AUTHOR_ROLE_OWNER, amo.AUTHOR_ROLE_DEV),
addon__type=amo.ADDON_EXTENSION,
addon__disabled_by_user=False)
.filter(
Q(addon__is_listed=True,
addon___current_version__files__status__in=amo.REVIEWED_STATUSES,
addon__status__in=amo.REVIEWED_STATUSES) |
Q(addon__is_listed=False,
addon__versions__files__is_signed=True))
.exists() or
Addon.objects.filter(
authors=user,
type=amo.ADDON_PERSONA,
status=amo.STATUS_PUBLIC,
disabled_by_user=False)
.aggregate(users=Sum('average_daily_users'))['users'] >=
MIN_PERSONA_ADU)
@write
@login_required
def t_shirt(request):
if not waffle.switch_is_active('t-shirt-orders'):
raise http.Http404()
user = request.user
eligible = tshirt_eligible(user)
if request.method == 'POST':
if not eligible:
messages.error(request,
_("We're sorry, but you are not eligible to "
"request a t-shirt at this time."))
return redirect('users.t-shirt')
if not user.t_shirt_requested:
user.update(t_shirt_requested=datetime.now())
return render(request, 'users/t-shirt.html',
{'eligible': eligible, 'user': user})
@write
@login_required
@permission_required('Users', 'Edit')
@user_view
def admin_edit(request, user):
if request.method == 'POST':
form = forms.AdminUserEditForm(request.POST, request.FILES,
request=request, instance=user)
if form.is_valid():
form.save()
messages.success(request, _('Profile Updated'))
return http.HttpResponseRedirect(reverse('zadmin.index'))
else:
form = forms.AdminUserEditForm(instance=user, request=request)
return render(request, 'users/edit.html', {'form': form, 'amouser': user})
@user_view
def emailchange(request, user, token, hash):
try:
_uid, newemail = EmailResetCode.parse(token, hash)
except ValueError:
return http.HttpResponse(status=400)
if _uid != user.id:
# I'm calling this a warning because invalid hashes up to this point
# could be any number of things, but this is a targeted attack from
# one user account to another
log.warning((u"[Tampering] Valid email reset code for UID (%s) "
u"attempted to change email address for user (%s)") %
(_uid, user))
return http.HttpResponse(status=400)
if UserProfile.objects.filter(email=newemail).exists():
log.warning((u"[Tampering] User (%s) tries to change his email to "
u"an existing account with the same email address (%s)") %
(user, newemail))
return http.HttpResponse(status=400)
user.email = newemail
user.save()
l = {'user': user, 'newemail': newemail}
log.info(u"User (%(user)s) confirmed new email address (%(newemail)s)" % l)
messages.success(
request, _('Your email address was changed successfully'),
_(u'From now on, please use {0} to log in.').format(newemail))
return http.HttpResponseRedirect(reverse('users.edit'))
def _clean_next_url(request):
gets = request.GET.copy()
url = gets.get('to', settings.LOGIN_REDIRECT_URL)
if not is_safe_url(url, host=request.get_host()):
log.info(u'Unsafe redirect to %s' % url)
url = settings.LOGIN_REDIRECT_URL
domain = gets.get('domain', None)
if domain in settings.VALID_LOGIN_REDIRECTS.keys():
url = settings.VALID_LOGIN_REDIRECTS[domain] + url
gets['to'] = url
request.GET = gets
return request
@anonymous_csrf
@mobile_template('users/{mobile/}login_modal.html')
def login_modal(request, template=None):
return _login(request, template=template)
@anonymous_csrf
@mobile_template('users/{mobile/}login.html')
def login(request, template=None):
return _login(request, template=template)
def _login(request, template=None, data=None, dont_redirect=False):
data = data or {}
# In case we need it later. See below.
get_copy = request.GET.copy()
if 'to' in request.GET:
request = _clean_next_url(request)
if request.user.is_authenticated():
return http.HttpResponseRedirect(
request.GET.get('to', settings.LOGIN_REDIRECT_URL))
limited = getattr(request, 'limited', 'recaptcha_shown' in request.POST)
user = None
login_status = None
if 'username' in request.POST:
try:
# We are doing all this before we try and validate the form.
user = UserProfile.objects.get(email=request.POST['username'])
limited = ((user.failed_login_attempts >=
settings.LOGIN_RATELIMIT_USER) or limited)
login_status = False
except UserProfile.DoesNotExist:
log_cef('Authentication Failure', 5, request,
username=request.POST['username'],
signature='AUTHFAIL',
msg='The username was invalid')
pass
partial_form = partial(forms.AuthenticationForm, use_recaptcha=limited)
r = auth.views.login(request, template_name=template,
redirect_field_name='to',
authentication_form=partial_form,
extra_context=data)
if isinstance(r, http.HttpResponseRedirect):
# Django's auth.views.login has security checks to prevent someone from
# redirecting to another domain. Since we want to allow this in
# certain cases, we have to make a new response object here to replace
# the above.
if 'domain' in request.GET:
request.GET = get_copy
request = _clean_next_url(request)
r = http.HttpResponseRedirect(request.GET['to'])
# Succsesful log in according to django. Now we do our checks. I do
# the checks here instead of the form's clean() because I want to use
# the messages framework and it's not available in the request there.
if user.deleted:
logout(request)
log.warning(u'Attempt to log in with deleted account (%s)' % user)
messages.error(request, _('Wrong email address or password!'))
data.update({'form': partial_form()})
user.log_login_attempt(False)
log_cef('Authentication Failure', 5, request,
username=request.user,
signature='AUTHFAIL',
msg='Account is deactivated')
return render(request, template, data)
if user.confirmationcode:
logout(request)
log.info(u'Attempt to log in with unconfirmed account (%s)' % user)
msg1 = _(u'A link to activate your user account was sent by email '
u'to your address {0}. You have to click it before you '
u'can log in.').format(user.email)
url = "%s%s" % (settings.SITE_URL,
reverse('users.confirm.resend', args=[user.id]))
msg2 = _('If you did not receive the confirmation email, make '
'sure your email service did not mark it as "junk '
'mail" or "spam". If you need to, you can have us '
'<a href="%s">resend the confirmation message</a> '
'to your email address mentioned above.') % url
messages.error(request, _('Activation Email Sent'), msg1)
messages.info(request, _('Having Trouble?'), msg2,
title_safe=True, message_safe=True)
data.update({'form': partial_form()})
user.log_login_attempt(False)
return render(request, template, data)
rememberme = request.POST.get('rememberme', None)
if rememberme:
request.session.set_expiry(settings.SESSION_COOKIE_AGE)
log.debug(
u'User (%s) logged in successfully with "remember me" set' %
user)
login_status = True
if dont_redirect:
# We're recalling the middleware to re-initialize user
ACLMiddleware().process_request(request)
r = render(request, template, data)
if login_status is not None:
user.log_login_attempt(login_status)
log_cef('Authentication Failure', 5, request,
username=request.POST['username'],
signature='AUTHFAIL',
msg='The password was incorrect')
return r
def logout(request):
user = request.user
if not user.is_anonymous():
log.debug(u"User (%s) logged out" % user)
auth.logout(request)
if 'to' in request.GET:
request = _clean_next_url(request)
next = request.GET.get('to')
if not next:
next = settings.LOGOUT_REDIRECT_URL
prefixer = get_url_prefix()
if prefixer:
next = prefixer.fix(next)
response = http.HttpResponseRedirect(next)
# Fire logged out signal.
logged_out.send(None, request=request, response=response)
return response
@user_view
def profile(request, user):
# Get user's own and favorite collections, if they allowed that.
own_coll = fav_coll = []
if user.display_collections:
own_coll = (Collection.objects.listed().filter(author=user)
.order_by('-created'))[:10]
if user.display_collections_fav:
fav_coll = (Collection.objects.listed()
.filter(following__user=user)
.order_by('-following__created'))[:10]
edit_any_user = acl.action_allowed(request, 'Users', 'Edit')
own_profile = (request.user.is_authenticated() and
request.user.id == user.id)
addons = []
personas = []
limited_personas = False
if user.is_developer:
addons = user.addons.reviewed().filter(
addonuser__user=user, addonuser__listed=True)
personas = addons.filter(type=amo.ADDON_PERSONA).order_by(
'-persona__popularity')
if personas.count() > THEMES_LIMIT:
limited_personas = True
personas = personas[:THEMES_LIMIT]
addons = addons.exclude(type=amo.ADDON_PERSONA).order_by(
'-weekly_downloads')
addons = amo.utils.paginate(request, addons, 5)
reviews = amo.utils.paginate(request, user.reviews.all())
data = {'profile': user, 'own_coll': own_coll, 'reviews': reviews,
'fav_coll': fav_coll, 'edit_any_user': edit_any_user,
'addons': addons, 'own_profile': own_profile,
'personas': personas, 'limited_personas': limited_personas,
'THEMES_LIMIT': THEMES_LIMIT}
if not own_profile:
data['abuse_form'] = AbuseForm(request=request)
return render(request, 'users/profile.html', data)
@user_view
def themes(request, user, category=None):
cats = Category.objects.filter(type=amo.ADDON_PERSONA)
ctx = {
'profile': user,
'categories': order_by_translation(cats, 'name'),
'search_cat': 'themes'
}
if user.is_artist:
base = user.addons.reviewed().filter(
type=amo.ADDON_PERSONA,
addonuser__user=user, addonuser__listed=True)
if category:
qs = cats.filter(slug=category)
ctx['category'] = cat = get_list_or_404(qs)[0]
base = base.filter(categories__id=cat.id)
else:
base = Addon.objects.none()
filter_ = PersonasFilter(request, base, key='sort',
default='popular')
addons = amo.utils.paginate(request, filter_.qs, 30,
count=base.count())
ctx.update({
'addons': addons,
'filter': filter_,
'sorting': filter_.field,
'sort_opts': filter_.opts
})
return render(request, 'browse/personas/grid.html', ctx)
@anonymous_csrf
def register(request):
if request.user.is_authenticated():
messages.info(request, _('You are already logged in to an account.'))
form = None
elif request.method == 'POST':
form = forms.UserRegisterForm(request.POST)
mkt_user = UserProfile.objects.filter(email=form.data['email'],
password='')
if form.is_valid():
try:
u = form.save(commit=False)
u.set_password(form.cleaned_data['password'])
u.generate_confirmationcode()
u.lang = request.LANG
u.save()
log.info(u'Registered new account for user (%s)', u)
log_cef('New Account', 5, request, username=u.username,
signature='AUTHNOTICE',
msg='User created a new account')
u.email_confirmation_code()
msg = _('Congratulations! Your user account was '
'successfully created.')
messages.success(request, msg)
msg = _(u'An email has been sent to your address {0} to '
'confirm your account. Before you can log in, you '
'have to activate your account by clicking on the '
'link provided in this email.').format(u.email)
messages.info(request, _('Confirmation Email Sent'), msg)
except IntegrityError, e:
# I was unable to reproduce this, but I suspect it happens
# when they POST twice quickly and the slaves don't have the
# new info yet (total guess). Anyway, I'm assuming the
# first one worked properly, so this is still a success
# case to the end user so we just log it...
log.error('Failed to register new user (%s): %s' % (u, e))
return http.HttpResponseRedirect(reverse('users.login'))
elif mkt_user.exists():
f = PasswordResetForm()
f.users_cache = [mkt_user[0]]
f.save(use_https=request.is_secure(),
email_template_name='users/email/pwreset.ltxt',
request=request)
return render(request, 'users/newpw_sent.html', {})
else:
messages.error(request, _('There are errors in this form'),
_('Please correct them and resubmit.'))
else:
form = forms.UserRegisterForm()
reg_action = reverse('users.register')
return render(request, 'users/register.html',
{'form': form, 'register_action': reg_action})
@anonymous_csrf_exempt
@user_view
def report_abuse(request, user):
form = AbuseForm(request.POST or None, request=request)
if request.method == 'POST' and form.is_valid():
send_abuse_report(request, user, form.cleaned_data['text'])
messages.success(request, _('User reported.'))
else:
return render(request, 'users/report_abuse_full.html',
{'profile': user, 'abuse_form': form})
return redirect(user.get_url_path())
@post_required
@user_view
def remove_locale(request, user):
"""Remove a locale from the user's translations."""
POST = request.POST
if 'locale' in POST and POST['locale'] != settings.LANGUAGE_CODE:
user.remove_locale(POST['locale'])
return http.HttpResponse()
return http.HttpResponseBadRequest()
@never_cache
@anonymous_csrf
def password_reset_confirm(request, uidb64=None, token=None):
"""
Pulled from django contrib so that we can add user into the form
so then we can show relevant messages about the user.
"""
assert uidb64 is not None and token is not None
user = None
try:
uid_int = urlsafe_base64_decode(uidb64)
user = UserProfile.objects.get(id=uid_int)
except (ValueError, UserProfile.DoesNotExist, TypeError):
pass
if user is not None and default_token_generator.check_token(user, token):
validlink = True
if request.method == 'POST':
form = forms.SetPasswordForm(user, request.POST)
if form.is_valid():
form.save()
log_cef('Password Changed', 5, request,
username=user.username,
signature='PASSWORDCHANGED',
msg='User changed password')
return redirect(reverse('django.contrib.auth.'
'views.password_reset_complete'))
else:
form = forms.SetPasswordForm(user)
else:
validlink = False
form = None
return render(request, 'users/pwreset_confirm.html',
{'form': form, 'validlink': validlink})
@never_cache
def unsubscribe(request, hash=None, token=None, perm_setting=None):
"""
Pulled from django contrib so that we can add user into the form
so then we can show relevant messages about the user.
"""
assert hash is not None and token is not None
user = None
try:
email = UnsubscribeCode.parse(token, hash)
user = UserProfile.objects.get(email=email)
except (ValueError, UserProfile.DoesNotExist):
pass
perm_settings = []
if user is not None:
unsubscribed = True
if not perm_setting:
# TODO: make this work. nothing currently links to it, though.
perm_settings = [l for l in notifications.NOTIFICATIONS
if not l.mandatory]
else:
perm_setting = notifications.NOTIFICATIONS_BY_SHORT[perm_setting]
UserNotification.update_or_create(
update={'enabled': False},
user=user, notification_id=perm_setting.id)
perm_settings = [perm_setting]
else:
unsubscribed = False
email = ''
return render(request, 'users/unsubscribe.html',
{'unsubscribed': unsubscribed, 'email': email,
'perm_settings': perm_settings})
| bsd-3-clause | -4,560,108,672,903,128,600 | 35.284932 | 79 | 0.595817 | false |
diracdeltas/lets-encrypt-preview | letsencrypt/client/tests/revoker_test.py | 1 | 15041 | """Test letsencrypt.client.revoker."""
import csv
import os
import pkg_resources
import shutil
import tempfile
import unittest
import mock
from letsencrypt.client import errors
from letsencrypt.client import le_util
from letsencrypt.client.plugins.apache import configurator
from letsencrypt.client.display import util as display_util
class RevokerBase(unittest.TestCase): # pylint: disable=too-few-public-methods
"""Base Class for Revoker Tests."""
def setUp(self):
self.paths, self.certs, self.key_path = create_revoker_certs()
self.backup_dir = tempfile.mkdtemp("cert_backup")
self.mock_config = mock.MagicMock(cert_key_backup=self.backup_dir)
self.list_path = os.path.join(self.backup_dir, "LIST")
def _store_certs(self):
# pylint: disable=protected-access
from letsencrypt.client.revoker import Revoker
Revoker.store_cert_key(self.paths[0], self.key_path, self.mock_config)
Revoker.store_cert_key(self.paths[1], self.key_path, self.mock_config)
# Set metadata
for i in xrange(2):
self.certs[i].add_meta(
i, self.paths[i], self.key_path,
Revoker._get_backup(self.backup_dir, i, self.paths[i]),
Revoker._get_backup(self.backup_dir, i, self.key_path))
def _get_rows(self):
with open(self.list_path, "rb") as csvfile:
return [row for row in csv.reader(csvfile)]
def _write_rows(self, rows):
with open(self.list_path, "wb") as csvfile:
csvwriter = csv.writer(csvfile)
for row in rows:
csvwriter.writerow(row)
class RevokerTest(RevokerBase):
def setUp(self):
from letsencrypt.client.revoker import Revoker
super(RevokerTest, self).setUp()
with open(self.key_path) as key_file:
self.key = le_util.Key(self.key_path, key_file.read())
self._store_certs()
self.revoker = Revoker(
mock.MagicMock(spec=configurator.ApacheConfigurator),
self.mock_config)
def tearDown(self):
shutil.rmtree(self.backup_dir)
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_key_all(self, mock_display, mock_net):
mock_display().confirm_revocation.return_value = True
self.revoker.revoke_from_key(self.key)
self.assertEqual(self._get_rows(), [])
# Check to make sure backups were eliminated
for i in xrange(2):
self.assertFalse(self._backups_exist(self.certs[i].get_row()))
self.assertEqual(mock_net.call_count, 2)
@mock.patch("letsencrypt.client.revoker.Crypto.PublicKey.RSA.importKey")
def test_revoke_by_invalid_keys(self, mock_import):
mock_import.side_effect = ValueError
self.assertRaises(errors.LetsEncryptRevokerError,
self.revoker.revoke_from_key,
self.key)
mock_import.side_effect = [mock.Mock(), IndexError]
self.assertRaises(errors.LetsEncryptRevokerError,
self.revoker.revoke_from_key,
self.key)
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_wrong_key(self, mock_display, mock_net):
mock_display().confirm_revocation.return_value = True
key_path = pkg_resources.resource_filename(
"letsencrypt.acme.jose", os.path.join(
"testdata", "rsa256_key.pem"))
wrong_key = le_util.Key(key_path, open(key_path).read())
self.revoker.revoke_from_key(wrong_key)
# Nothing was removed
self.assertEqual(len(self._get_rows()), 2)
# No revocation went through
self.assertEqual(mock_net.call_count, 0)
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_cert(self, mock_display, mock_net):
mock_display().confirm_revocation.return_value = True
self.revoker.revoke_from_cert(self.paths[1])
row0 = self.certs[0].get_row()
row1 = self.certs[1].get_row()
self.assertEqual(self._get_rows(), [row0])
self.assertTrue(self._backups_exist(row0))
self.assertFalse(self._backups_exist(row1))
self.assertEqual(mock_net.call_count, 1)
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_cert_not_found(self, mock_display, mock_net):
mock_display().confirm_revocation.return_value = True
self.revoker.revoke_from_cert(self.paths[0])
self.revoker.revoke_from_cert(self.paths[0])
row0 = self.certs[0].get_row()
row1 = self.certs[1].get_row()
# Same check as last time... just reversed.
self.assertEqual(self._get_rows(), [row1])
self.assertTrue(self._backups_exist(row1))
self.assertFalse(self._backups_exist(row0))
self.assertEqual(mock_net.call_count, 1)
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_menu(self, mock_display, mock_net):
mock_display().confirm_revocation.return_value = True
mock_display.display_certs.side_effect = [
(display_util.HELP, 0),
(display_util.OK, 0),
(display_util.CANCEL, -1),
]
self.revoker.revoke_from_menu()
row0 = self.certs[0].get_row()
row1 = self.certs[1].get_row()
self.assertEqual(self._get_rows(), [row1])
self.assertFalse(self._backups_exist(row0))
self.assertTrue(self._backups_exist(row1))
self.assertEqual(mock_net.call_count, 1)
self.assertEqual(mock_display.more_info_cert.call_count, 1)
@mock.patch("letsencrypt.client.revoker.logging")
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_menu_delete_all(self, mock_display, mock_net, mock_log):
mock_display().confirm_revocation.return_value = True
mock_display.display_certs.return_value = (display_util.OK, 0)
self.revoker.revoke_from_menu()
self.assertEqual(self._get_rows(), [])
# Everything should be deleted...
for i in xrange(2):
self.assertFalse(self._backups_exist(self.certs[i].get_row()))
self.assertEqual(mock_net.call_count, 2)
# Info is called when there aren't any certs left...
self.assertTrue(mock_log.info.called)
@mock.patch("letsencrypt.client.revoker.revocation")
@mock.patch("letsencrypt.client.revoker.Revoker._acme_revoke")
@mock.patch("letsencrypt.client.revoker.logging")
def test_safe_revoke_acme_fail(self, mock_log, mock_revoke, mock_display):
# pylint: disable=protected-access
mock_revoke.side_effect = errors.LetsEncryptClientError
mock_display().confirm_revocation.return_value = True
self.revoker._safe_revoke(self.certs)
self.assertTrue(mock_log.error.called)
@mock.patch("letsencrypt.client.revoker.Crypto.PublicKey.RSA.importKey")
def test_acme_revoke_failure(self, mock_crypto):
# pylint: disable=protected-access
mock_crypto.side_effect = ValueError
self.assertRaises(errors.LetsEncryptClientError,
self.revoker._acme_revoke,
self.certs[0])
def test_remove_certs_from_list_bad_certs(self):
# pylint: disable=protected-access
from letsencrypt.client.revoker import Cert
new_cert = Cert(self.paths[0])
# This isn't stored in the db
new_cert.idx = 10
new_cert.backup_path = self.paths[0]
new_cert.backup_key_path = self.key_path
new_cert.orig = Cert.PathStatus("false path", "not here")
new_cert.orig_key = Cert.PathStatus("false path", "not here")
self.assertRaises(errors.LetsEncryptRevokerError,
self.revoker._remove_certs_from_list,
[new_cert])
def _backups_exist(self, row):
# pylint: disable=protected-access
cert_path, key_path = self.revoker._row_to_backup(row)
return os.path.isfile(cert_path) and os.path.isfile(key_path)
class RevokerInstallerTest(RevokerBase):
def setUp(self):
super(RevokerInstallerTest, self).setUp()
self.installs = [
["installation/path0a", "installation/path0b"],
["installation/path1"],
]
self.certs_keys = [
(self.paths[0], self.key_path, self.installs[0][0]),
(self.paths[0], self.key_path, self.installs[0][1]),
(self.paths[1], self.key_path, self.installs[1][0]),
]
self._store_certs()
def _get_revoker(self, installer):
from letsencrypt.client.revoker import Revoker
return Revoker(installer, self.mock_config)
def test_no_installer_get_installed_locations(self):
# pylint: disable=protected-access
revoker = self._get_revoker(None)
self.assertEqual(revoker._get_installed_locations(), {})
def test_get_installed_locations(self):
# pylint: disable=protected-access
mock_installer = mock.MagicMock()
mock_installer.get_all_certs_keys.return_value = self.certs_keys
revoker = self._get_revoker(mock_installer)
sha_vh = revoker._get_installed_locations()
self.assertEqual(len(sha_vh), 2)
for i, cert in enumerate(self.certs):
self.assertTrue(cert.get_fingerprint() in sha_vh)
self.assertEqual(
sha_vh[cert.get_fingerprint()], self.installs[i])
@mock.patch("letsencrypt.client.revoker.M2Crypto.X509.load_cert")
def test_get_installed_load_failure(self, mock_m2):
mock_installer = mock.MagicMock()
mock_installer.get_all_certs_keys.return_value = self.certs_keys
mock_m2.side_effect = IOError
revoker = self._get_revoker(mock_installer)
# pylint: disable=protected-access
self.assertEqual(revoker._get_installed_locations(), {})
class RevokerClassMethodsTest(RevokerBase):
def setUp(self):
super(RevokerClassMethodsTest, self).setUp()
self.mock_config = mock.MagicMock(cert_key_backup=self.backup_dir)
def tearDown(self):
shutil.rmtree(self.backup_dir)
def _call(self, cert_path, key_path):
from letsencrypt.client.revoker import Revoker
Revoker.store_cert_key(cert_path, key_path, self.mock_config)
def test_store_two(self):
from letsencrypt.client.revoker import Revoker
self._call(self.paths[0], self.key_path)
self._call(self.paths[1], self.key_path)
self.assertTrue(os.path.isfile(self.list_path))
rows = self._get_rows()
for i, row in enumerate(rows):
# pylint: disable=protected-access
self.assertTrue(os.path.isfile(
Revoker._get_backup(self.backup_dir, i, self.paths[i])))
self.assertTrue(os.path.isfile(
Revoker._get_backup(self.backup_dir, i, self.key_path)))
self.assertEqual([str(i), self.paths[i], self.key_path], row)
self.assertEqual(len(rows), 2)
def test_store_one_mixed(self):
from letsencrypt.client.revoker import Revoker
self._write_rows(
[["5", "blank", "blank"], ["18", "dc", "dc"], ["21", "b", "b"]])
self._call(self.paths[0], self.key_path)
self.assertEqual(
self._get_rows()[3], ["22", self.paths[0], self.key_path])
# pylint: disable=protected-access
self.assertTrue(os.path.isfile(
Revoker._get_backup(self.backup_dir, 22, self.paths[0])))
self.assertTrue(os.path.isfile(
Revoker._get_backup(self.backup_dir, 22, self.key_path)))
class CertTest(unittest.TestCase):
def setUp(self):
self.paths, self.certs, self.key_path = create_revoker_certs()
def test_failed_load(self):
from letsencrypt.client.revoker import Cert
self.assertRaises(errors.LetsEncryptRevokerError, Cert, self.key_path)
def test_no_row(self):
self.assertEqual(self.certs[0].get_row(), None)
def test_meta_moved_files(self):
from letsencrypt.client.revoker import Cert
fake_path = "/not/a/real/path/r72d3t6"
self.certs[0].add_meta(
0, fake_path, fake_path, self.paths[0], self.key_path)
self.assertEqual(self.certs[0].orig.status, Cert.DELETED_MSG)
self.assertEqual(self.certs[0].orig_key.status, Cert.DELETED_MSG)
def test_meta_changed_files(self):
from letsencrypt.client.revoker import Cert
self.certs[0].add_meta(
0, self.paths[1], self.paths[1], self.paths[0], self.key_path)
self.assertEqual(self.certs[0].orig.status, Cert.CHANGED_MSG)
self.assertEqual(self.certs[0].orig_key.status, Cert.CHANGED_MSG)
def test_meta_no_status(self):
self.certs[0].add_meta(
0, self.paths[0], self.key_path, self.paths[0], self.key_path)
self.assertEqual(self.certs[0].orig.status, "")
self.assertEqual(self.certs[0].orig_key.status, "")
def test_print_meta(self):
"""Just make sure there aren't any major errors."""
self.certs[0].add_meta(
0, self.paths[0], self.key_path, self.paths[0], self.key_path)
# Changed path and deleted file
self.certs[1].add_meta(
1, self.paths[0], "/not/a/path", self.paths[1], self.key_path)
self.assertTrue(self.certs[0].pretty_print())
self.assertTrue(self.certs[1].pretty_print())
def test_print_no_meta(self):
self.assertTrue(self.certs[0].pretty_print())
self.assertTrue(self.certs[1].pretty_print())
def create_revoker_certs():
"""Create a few revoker.Cert objects."""
from letsencrypt.client.revoker import Cert
base_package = "letsencrypt.client.tests"
cert0_path = pkg_resources.resource_filename(
base_package, os.path.join("testdata", "cert.pem"))
cert1_path = pkg_resources.resource_filename(
base_package, os.path.join("testdata", "cert-san.pem"))
cert0 = Cert(cert0_path)
cert1 = Cert(cert1_path)
key_path = pkg_resources.resource_filename(
base_package, os.path.join("testdata", "rsa512_key.pem"))
return [cert0_path, cert1_path], [cert0, cert1], key_path
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 | -3,991,802,676,454,155,000 | 35.865196 | 79 | 0.633734 | false |
saghul/gyn | gyn/msvs_emulation.py | 1 | 47261 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module helps emulate Visual Studio 2008 behavior on top of other
build systems, primarily ninja.
"""
import os
import re
import subprocess
import sys
from gyn.common import OrderedSet
import gyn.MSVSUtil
import gyn.MSVSVersion
import collections
windows_quoter_regex = re.compile(r'(\\*)"')
def QuoteForRspFile(arg):
"""Quote a command line argument so that it appears as one argument when
processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
Windows programs)."""
# See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
# threads. This is actually the quoting rules for CommandLineToArgvW, not
# for the shell, because the shell doesn't do anything in Windows. This
# works more or less because most programs (including the compiler, etc.)
# use that function to handle command line arguments.
# For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
# preceding it, and results in n backslashes + the quote. So we substitute
# in 2* what we match, +1 more, plus the quote.
arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
# %'s also need to be doubled otherwise they're interpreted as batch
# positional arguments. Also make sure to escape the % so that they're
# passed literally through escaping so they can be singled to just the
# original %. Otherwise, trying to pass the literal representation that
# looks like an environment variable to the shell (e.g. %PATH%) would fail.
arg = arg.replace('%', '%%')
# These commands are used in rsp files, so no escaping for the shell (via ^)
# is necessary.
# Finally, wrap the whole thing in quotes so that the above quote rule
# applies and whitespace isn't a word break.
return '"' + arg + '"'
def EncodeRspFileList(args):
"""Process a list of arguments using QuoteCmdExeArgument."""
# Note that the first argument is assumed to be the command. Don't add
# quotes around it because then built-ins like 'echo', etc. won't work.
# Take care to normpath only the path in the case of 'call ../x.bat' because
# otherwise the whole thing is incorrectly interpreted as a path and not
# normalized correctly.
if not args: return ''
if args[0].startswith('call '):
call, program = args[0].split(' ', 1)
program = call + ' ' + os.path.normpath(program)
else:
program = os.path.normpath(args[0])
return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
def _GenericRetrieve(root, default, path):
"""Given a list of dictionary keys |path| and a tree of dicts |root|, find
value at path, or return |default| if any of the path doesn't exist."""
if not root:
return default
if not path:
return root
return _GenericRetrieve(root.get(path[0]), default, path[1:])
def _AddPrefix(element, prefix):
"""Add |prefix| to |element| or each subelement if element is iterable."""
if element is None:
return element
# Note, not Iterable because we don't want to handle strings like that.
if isinstance(element, list) or isinstance(element, tuple):
return [prefix + e for e in element]
else:
return prefix + element
def _DoRemapping(element, map):
"""If |element| then remap it through |map|. If |element| is iterable then
each item will be remapped. Any elements not found will be removed."""
if map is not None and element is not None:
if not isinstance(map, collections.Callable):
map = map.get # Assume it's a dict, otherwise a callable to do the remap.
if isinstance(element, list) or isinstance(element, tuple):
element = [_f for _f in [list(map(elem)) for elem in element] if _f]
else:
element = list(map(element))
return element
def _AppendOrReturn(append, element):
"""If |append| is None, simply return |element|. If |append| is not None,
then add |element| to it, adding each item in |element| if it's a list or
tuple."""
if append is not None and element is not None:
if isinstance(element, list) or isinstance(element, tuple):
append.extend(element)
else:
append.append(element)
else:
return element
def _FindDirectXInstallation():
"""Try to find an installation location for the DirectX SDK. Check for the
standard environment variable, and if that doesn't exist, try to find
via the registry. May return None if not found in either location."""
# Return previously calculated value, if there is one
if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
return _FindDirectXInstallation.dxsdk_dir
dxsdk_dir = os.environ.get('DXSDK_DIR')
if not dxsdk_dir:
# Setup params to pass to and attempt to launch reg.exe.
cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for line in p.communicate()[0].splitlines():
if 'InstallPath' in line:
dxsdk_dir = line.split(' ')[3] + "\\"
# Cache return value
_FindDirectXInstallation.dxsdk_dir = dxsdk_dir
return dxsdk_dir
def GetGlobalVSMacroEnv(vs_version):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents. Returns all variables that are independent of the target."""
env = {}
# '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
# Visual Studio is actually installed.
if vs_version.Path():
env['$(VSInstallDir)'] = vs_version.Path()
env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
# Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
# set. This happens when the SDK is sync'd via src-internal, rather than
# by typical end-user installation of the SDK. If it's not set, we don't
# want to leave the unexpanded variable in the path, so simply strip it.
dxsdk_dir = _FindDirectXInstallation()
env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
# Try to find an installation location for the Windows DDK by checking
# the WDK_DIR environment variable, may be None.
env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
return env
def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
"""Finds msvs_system_include_dirs that are common to all targets, removes
them from all targets, and returns an OrderedSet containing them."""
all_system_includes = OrderedSet(
configs[0].get('msvs_system_include_dirs', []))
for config in configs[1:]:
system_includes = config.get('msvs_system_include_dirs', [])
all_system_includes = all_system_includes & OrderedSet(system_includes)
if not all_system_includes:
return None
# Expand macros in all_system_includes.
env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
expanded_system_includes = OrderedSet([ExpandMacros(include, env)
for include in all_system_includes])
if any(['$' in include for include in expanded_system_includes]):
# Some path relies on target-specific variables, bail.
return None
# Remove system includes shared by all targets from the targets.
for config in configs:
includes = config.get('msvs_system_include_dirs', [])
if includes: # Don't insert a msvs_system_include_dirs key if not needed.
# This must check the unexpanded includes list:
new_includes = [i for i in includes if i not in all_system_includes]
config['msvs_system_include_dirs'] = new_includes
return expanded_system_includes
class MsvsSettings(object):
"""A class that understands the gyp 'msvs_...' values (especially the
msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
class helps map those settings to command line options."""
def __init__(self, spec, generator_flags):
self.spec = spec
self.vs_version = GetVSVersion(generator_flags)
supported_fields = [
('msvs_configuration_attributes', dict),
('msvs_settings', dict),
('msvs_system_include_dirs', list),
('msvs_disabled_warnings', list),
('msvs_precompiled_header', str),
('msvs_precompiled_source', str),
('msvs_configuration_platform', str),
('msvs_target_platform', str),
]
configs = spec['configurations']
for field, default in supported_fields:
setattr(self, field, {})
for configname, config in configs.items():
getattr(self, field)[configname] = config.get(field, default())
self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
unsupported_fields = [
'msvs_prebuild',
'msvs_postbuild',
]
unsupported = []
for field in unsupported_fields:
for config in list(configs.values()):
if field in config:
unsupported += ["%s not supported (target %s)." %
(field, spec['target_name'])]
if unsupported:
raise Exception('\n'.join(unsupported))
def GetExtension(self):
"""Returns the extension for the target, with no leading dot.
Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
the target type.
"""
ext = self.spec.get('product_extension', None)
if ext:
return ext
return gyn.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
def GetVSMacroEnv(self, base_to_build=None, config=None):
"""Get a dict of variables mapping internal VS macro names to their gyp
equivalents."""
target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
target_name = self.spec.get('product_prefix', '') + \
self.spec.get('product_name', self.spec['target_name'])
target_dir = base_to_build + '\\' if base_to_build else ''
target_ext = '.' + self.GetExtension()
target_file_name = target_name + target_ext
replacements = {
'$(InputName)': '${root}',
'$(InputPath)': '${source}',
'$(IntDir)': '$!INTERMEDIATE_DIR',
'$(OutDir)\\': target_dir,
'$(PlatformName)': target_platform,
'$(ProjectDir)\\': '',
'$(ProjectName)': self.spec['target_name'],
'$(TargetDir)\\': target_dir,
'$(TargetExt)': target_ext,
'$(TargetFileName)': target_file_name,
'$(TargetName)': target_name,
'$(TargetPath)': os.path.join(target_dir, target_file_name),
}
replacements.update(GetGlobalVSMacroEnv(self.vs_version))
return replacements
def ConvertVSMacros(self, s, base_to_build=None, config=None):
"""Convert from VS macro names to something equivalent."""
env = self.GetVSMacroEnv(base_to_build, config=config)
return ExpandMacros(s, env)
def AdjustLibraries(self, libraries):
"""Strip -l from library if it's specified with that."""
libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
return [lib + '.lib' if not lib.endswith('.lib') else lib for lib in libs]
def _GetAndMunge(self, field, path, default, prefix, append, map):
"""Retrieve a value from |field| at |path| or return |default|. If
|append| is specified, and the item is found, it will be appended to that
object instead of returned. If |map| is specified, results will be
remapped through |map| before being returned or appended."""
result = _GenericRetrieve(field, default, path)
result = _DoRemapping(result, map)
result = _AddPrefix(result, prefix)
return _AppendOrReturn(append, result)
class _GetWrapper(object):
def __init__(self, parent, field, base_path, append=None):
self.parent = parent
self.field = field
self.base_path = [base_path]
self.append = append
def __call__(self, name, map=None, prefix='', default=None):
return self.parent._GetAndMunge(self.field, self.base_path + [name],
default=default, prefix=prefix, append=self.append, map=map)
def GetArch(self, config):
"""Get architecture based on msvs_configuration_platform and
msvs_target_platform. Returns either 'x86' or 'x64'."""
configuration_platform = self.msvs_configuration_platform.get(config, '')
platform = self.msvs_target_platform.get(config, '')
if not platform: # If no specific override, use the configuration's.
platform = configuration_platform
# Map from platform to architecture.
return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
def _TargetConfig(self, config):
"""Returns the target-specific configuration."""
# There's two levels of architecture/platform specification in VS. The
# first level is globally for the configuration (this is what we consider
# "the" config at the gyp level, which will be something like 'Debug' or
# 'Release_x64'), and a second target-specific configuration, which is an
# override for the global one. |config| is remapped here to take into
# account the local target-specific overrides to the global configuration.
arch = self.GetArch(config)
if arch == 'x64' and not config.endswith('_x64'):
config += '_x64'
if arch == 'x86' and config.endswith('_x64'):
config = config.rsplit('_', 1)[0]
return config
def _Setting(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_settings."""
return self._GetAndMunge(
self.msvs_settings[config], path, default, prefix, append, map)
def _ConfigAttrib(self, path, config,
default=None, prefix='', append=None, map=None):
"""_GetAndMunge for msvs_configuration_attributes."""
return self._GetAndMunge(
self.msvs_configuration_attributes[config],
path, default, prefix, append, map)
def AdjustIncludeDirs(self, include_dirs, config):
"""Updates include_dirs to expand VS specific paths, and adds the system
include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
"""Updates midl_include_dirs to expand VS specific paths, and adds the
system include dirs used for platform SDK and similar."""
config = self._TargetConfig(config)
includes = midl_include_dirs + self.msvs_system_include_dirs[config]
includes.extend(self._Setting(
('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
return [self.ConvertVSMacros(p, config=config) for p in includes]
def GetComputedDefines(self, config):
"""Returns the set of defines that are injected to the defines list based
on other VS settings."""
config = self._TargetConfig(config)
defines = []
if self._ConfigAttrib(['CharacterSet'], config) == '1':
defines.extend(('_UNICODE', 'UNICODE'))
if self._ConfigAttrib(['CharacterSet'], config) == '2':
defines.append('_MBCS')
defines.extend(self._Setting(
('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
return defines
def GetCompilerPdbName(self, config, expand_special):
"""Get the pdb file name that should be used for compiler invocations, or
None if there's no explicit name specified."""
config = self._TargetConfig(config)
pdbname = self._Setting(
('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
if pdbname:
pdbname = expand_special(self.ConvertVSMacros(pdbname))
return pdbname
def GetMapFileName(self, config, expand_special):
"""Gets the explicitly overriden map file name for a target or returns None
if it's not set."""
config = self._TargetConfig(config)
map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
if map_file:
map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
return map_file
def GetOutputName(self, config, expand_special):
"""Gets the explicitly overridden output name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
type = self.spec['type']
root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
# TODO(scottmg): Handle OutputDirectory without OutputFile.
output_file = self._Setting((root, 'OutputFile'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetPDBName(self, config, expand_special, default):
"""Gets the explicitly overridden pdb name for a target or returns
default if it's not overridden, or if no pdb will be generated."""
config = self._TargetConfig(config)
output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
generate_debug_info = self._Setting(
('VCLinkerTool', 'GenerateDebugInformation'), config)
if generate_debug_info == 'true':
if output_file:
return expand_special(self.ConvertVSMacros(output_file, config=config))
else:
return default
else:
return None
def GetNoImportLibrary(self, config):
"""If NoImportLibrary: true, ninja will not expect the output to include
an import library."""
config = self._TargetConfig(config)
noimplib = self._Setting(('NoImportLibrary',), config)
return noimplib == 'true'
def GetAsmflags(self, config):
"""Returns the flags that need to be added to ml invocations."""
config = self._TargetConfig(config)
asmflags = []
safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
if safeseh == 'true':
asmflags.append('/safeseh')
return asmflags
def GetCflags(self, config):
"""Returns the flags that need to be added to .c and .cc compilations."""
config = self._TargetConfig(config)
cflags = []
cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
cl = self._GetWrapper(self, self.msvs_settings[config],
'VCCLCompilerTool', append=cflags)
cl('Optimization',
map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
cl('InlineFunctionExpansion', prefix='/Ob')
cl('DisableSpecificWarnings', prefix='/wd')
cl('StringPooling', map={'true': '/GF'})
cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
cl('FloatingPointModel',
map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
default='0')
cl('WholeProgramOptimization', map={'true': '/GL'})
cl('WarningLevel', prefix='/W')
cl('WarnAsError', map={'true': '/WX'})
cl('CallingConvention',
map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
cl('DebugInformationFormat',
map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
cl('MinimalRebuild', map={'true': '/Gm'})
cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
cl('RuntimeLibrary',
map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
cl('DefaultCharIsUnsigned', map={'true': '/J'})
cl('TreatWChar_tAsBuiltInType',
map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
cl('EnablePREfast', map={'true': '/analyze'})
cl('AdditionalOptions', prefix='')
cl('EnableEnhancedInstructionSet',
map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
prefix='/arch:')
cflags.extend(['/FI' + f for f in self._Setting(
('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
if self.vs_version.short_name in ('2013', '2013e', '2015'):
# New flag required in 2013 to maintain previous PDB behavior.
cflags.append('/FS')
# ninja handles parallelism by itself, don't have the compiler do it too.
cflags = [x for x in cflags if not x.startswith('/MP')]
return cflags
def _GetPchFlags(self, config, extension):
"""Get the flags to be added to the cflags for precompiled header support.
"""
config = self._TargetConfig(config)
# The PCH is only built once by a particular source file. Usage of PCH must
# only be for the same language (i.e. C vs. C++), so only include the pch
# flags when the language matches.
if self.msvs_precompiled_header[config]:
source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
if _LanguageMatchesForPch(source_ext, extension):
pch = os.path.split(self.msvs_precompiled_header[config])[1]
return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pch + '.pch']
return []
def GetCflagsC(self, config):
"""Returns the flags that need to be added to .c compilations."""
config = self._TargetConfig(config)
return self._GetPchFlags(config, '.c')
def GetCflagsCC(self, config):
"""Returns the flags that need to be added to .cc compilations."""
config = self._TargetConfig(config)
return ['/TP'] + self._GetPchFlags(config, '.cc')
def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
"""Get and normalize the list of paths in AdditionalLibraryDirectories
setting."""
config = self._TargetConfig(config)
libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
config, default=[])
libpaths = [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
for p in libpaths]
return ['/LIBPATH:"' + p + '"' for p in libpaths]
def GetLibFlags(self, config, gyp_to_build_path):
"""Returns the flags that need to be added to lib commands."""
config = self._TargetConfig(config)
libflags = []
lib = self._GetWrapper(self, self.msvs_settings[config],
'VCLibrarianTool', append=libflags)
libflags.extend(self._GetAdditionalLibraryDirectories(
'VCLibrarianTool', config, gyp_to_build_path))
lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
lib('AdditionalOptions')
return libflags
def GetDefFile(self, gyp_to_build_path):
"""Returns the .def file from sources, if any. Otherwise returns None."""
spec = self.spec
if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
return gyp_to_build_path(def_files[0])
elif len(def_files) > 1:
raise Exception("Multiple .def files")
return None
def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
""".def files get implicitly converted to a ModuleDefinitionFile for the
linker in the VS generator. Emulate that behaviour here."""
def_file = self.GetDefFile(gyp_to_build_path)
if def_file:
ldflags.append('/DEF:"%s"' % def_file)
def GetPGDName(self, config, expand_special):
"""Gets the explicitly overridden pgd name for a target or returns None
if it's not overridden."""
config = self._TargetConfig(config)
output_file = self._Setting(
('VCLinkerTool', 'ProfileGuidedDatabase'), config)
if output_file:
output_file = expand_special(self.ConvertVSMacros(
output_file, config=config))
return output_file
def GetLdflags(self, config, gyp_to_build_path, expand_special,
manifest_base_name, output_name, is_executable, build_dir):
"""Returns the flags that need to be added to link commands, and the
manifest files."""
config = self._TargetConfig(config)
ldflags = []
ld = self._GetWrapper(self, self.msvs_settings[config],
'VCLinkerTool', append=ldflags)
self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
ld('GenerateDebugInformation', map={'true': '/DEBUG'})
ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
prefix='/MACHINE:')
ldflags.extend(self._GetAdditionalLibraryDirectories(
'VCLinkerTool', config, gyp_to_build_path))
ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
ld('TreatLinkerWarningAsErrors', prefix='/WX',
map={'true': '', 'false': ':NO'})
out = self.GetOutputName(config, expand_special)
if out:
ldflags.append('/OUT:' + out)
pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
if pdb:
ldflags.append('/PDB:' + pdb)
pgd = self.GetPGDName(config, expand_special)
if pgd:
ldflags.append('/PGD:' + pgd)
map_file = self.GetMapFileName(config, expand_special)
ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
else '/MAP'})
ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
ld('AdditionalOptions', prefix='')
minimum_required_version = self._Setting(
('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
if minimum_required_version:
minimum_required_version = ',' + minimum_required_version
ld('SubSystem',
map={'1': 'CONSOLE%s' % minimum_required_version,
'2': 'WINDOWS%s' % minimum_required_version},
prefix='/SUBSYSTEM:')
ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
ld('BaseAddress', prefix='/BASE:')
ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
ld('RandomizedBaseAddress',
map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
ld('DataExecutionPrevention',
map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
ld('ForceSymbolReferences', prefix='/INCLUDE:')
ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
ld('LinkTimeCodeGeneration',
map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
'4': ':PGUPDATE'},
prefix='/LTCG')
ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
ld('EntryPointSymbol', prefix='/ENTRY:')
ld('Profile', map={'true': '/PROFILE'})
ld('LargeAddressAware',
map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
# TODO(scottmg): This should sort of be somewhere else (not really a flag).
ld('AdditionalDependencies', prefix='')
if self.GetArch(config) == 'x86':
safeseh_default = 'true'
else:
safeseh_default = None
ld('ImageHasSafeExceptionHandlers',
map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
default=safeseh_default)
# If the base address is not specifically controlled, DYNAMICBASE should
# be on by default.
base_flags = [x for x in ldflags if 'DYNAMICBASE' in x or x == '/FIXED']
if not base_flags:
ldflags.append('/DYNAMICBASE')
# If the NXCOMPAT flag has not been specified, default to on. Despite the
# documentation that says this only defaults to on when the subsystem is
# Vista or greater (which applies to the linker), the IDE defaults it on
# unless it's explicitly off.
if not [x for x in ldflags if 'NXCOMPAT' in x]:
ldflags.append('/NXCOMPAT')
have_def_file = [x for x in ldflags if x.startswith('/DEF:')]
manifest_flags, intermediate_manifest, manifest_files = \
self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
is_executable and not have_def_file, build_dir)
ldflags.extend(manifest_flags)
return ldflags, intermediate_manifest, manifest_files
def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
allow_isolation, build_dir):
"""Returns a 3-tuple:
- the set of flags that need to be added to the link to generate
a default manifest
- the intermediate manifest that the linker will generate that should be
used to assert it doesn't add anything to the merged one.
- the list of all the manifest files to be merged by the manifest tool and
included into the link."""
generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
config,
default='true')
if generate_manifest != 'true':
# This means not only that the linker should not generate the intermediate
# manifest but also that the manifest tool should do nothing even when
# additional manifests are specified.
return ['/MANIFEST:NO'], [], []
output_name = name + '.intermediate.manifest'
flags = [
'/MANIFEST',
'/ManifestFile:' + output_name,
]
# Instead of using the MANIFESTUAC flags, we generate a .manifest to
# include into the list of manifests. This allows us to avoid the need to
# do two passes during linking. The /MANIFEST flag and /ManifestFile are
# still used, and the intermediate manifest is used to assert that the
# final manifest we get from merging all the additional manifest files
# (plus the one we generate here) isn't modified by merging the
# intermediate into it.
# Always NO, because we generate a manifest file that has what we want.
flags.append('/MANIFESTUAC:NO')
config = self._TargetConfig(config)
enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
default='true')
manifest_files = []
generated_manifest_outer = \
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
"</assembly>"
if enable_uac == 'true':
execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
config, default='0')
execution_level_map = {
'0': 'asInvoker',
'1': 'highestAvailable',
'2': 'requireAdministrator'
}
ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
default='false')
inner = '''
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level='%s' uiAccess='%s' />
</requestedPrivileges>
</security>
</trustInfo>''' % (execution_level_map[execution_level], ui_access)
else:
inner = ''
generated_manifest_contents = generated_manifest_outer % inner
generated_name = name + '.generated.manifest'
# Need to join with the build_dir here as we're writing it during
# generation time, but we return the un-joined version because the build
# will occur in that directory. We only write the file if the contents
# have changed so that simply regenerating the project files doesn't
# cause a relink.
build_dir_generated_name = os.path.join(build_dir, generated_name)
gyn.common.EnsureDirExists(build_dir_generated_name)
f = gyn.common.WriteOnDiff(build_dir_generated_name)
f.write(generated_manifest_contents)
f.close()
manifest_files = [generated_name]
if allow_isolation:
flags.append('/ALLOWISOLATION')
manifest_files += self._GetAdditionalManifestFiles(config,
gyp_to_build_path)
return flags, output_name, manifest_files
def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
"""Gets additional manifest files that are added to the default one
generated by the linker."""
files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
default=[])
if isinstance(files, str):
files = files.split(';')
return [os.path.normpath(
gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
for f in files]
def IsUseLibraryDependencyInputs(self, config):
"""Returns whether the target should be linked via Use Library Dependency
Inputs (using component .objs of a given .lib)."""
config = self._TargetConfig(config)
uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
return uldi == 'true'
def IsEmbedManifest(self, config):
"""Returns whether manifest should be linked into binary."""
config = self._TargetConfig(config)
embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
default='true')
return embed == 'true'
def IsLinkIncremental(self, config):
"""Returns whether the target should be linked incrementally."""
config = self._TargetConfig(config)
link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
return link_inc != '1'
def GetRcflags(self, config, gyp_to_ninja_path):
"""Returns the flags that need to be added to invocations of the resource
compiler."""
config = self._TargetConfig(config)
rcflags = []
rc = self._GetWrapper(self, self.msvs_settings[config],
'VCResourceCompilerTool', append=rcflags)
rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
rcflags.append('/I' + gyp_to_ninja_path('.'))
rc('PreprocessorDefinitions', prefix='/d')
# /l arg must be in hex without leading '0x'
rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
return rcflags
def BuildCygwinBashCommandLine(self, args, path_to_base):
"""Build a command line that runs args via cygwin bash. We assume that all
incoming paths are in Windows normpath'd form, so they need to be
converted to posix style for the part of the command line that's passed to
bash. We also have to do some Visual Studio macro emulation here because
various rules use magic VS names for things. Also note that rules that
contain ninja variables cannot be fixed here (for example ${source}), so
the outer generator needs to make sure that the paths that are written out
are in posix style, if the command line will be used here."""
cygwin_dir = os.path.normpath(
os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
cd = ('cd %s' % path_to_base).replace('\\', '/')
args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
args = ["'%s'" % a.replace("'", "'\\''") for a in args]
bash_cmd = ' '.join(args)
cmd = (
'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
'bash -c "%s ; %s"' % (cd, bash_cmd))
return cmd
def IsRuleRunUnderCygwin(self, rule):
"""Determine if an action should be run under cygwin. If the variable is
unset, or set to 1 we use cygwin."""
return int(rule.get('msvs_cygwin_shell',
self.spec.get('msvs_cygwin_shell', 1))) != 0
def _HasExplicitRuleForExtension(self, spec, extension):
"""Determine if there's an explicit rule for a particular extension."""
for rule in spec.get('rules', []):
if rule['extension'] == extension:
return True
return False
def _HasExplicitIdlActions(self, spec):
"""Determine if an action should not run midl for .idl files."""
return any([action.get('explicit_idl_action', 0)
for action in spec.get('actions', [])])
def HasExplicitIdlRulesOrActions(self, spec):
"""Determine if there's an explicit rule or action for idl files. When
there isn't we need to generate implicit rules to build MIDL .idl files."""
return (self._HasExplicitRuleForExtension(spec, 'idl') or
self._HasExplicitIdlActions(spec))
def HasExplicitAsmRules(self, spec):
"""Determine if there's an explicit rule for asm files. When there isn't we
need to generate implicit rules to assemble .asm files."""
return self._HasExplicitRuleForExtension(spec, 'asm')
def GetIdlBuildData(self, source, config):
"""Determine the implicit outputs for an idl file. Returns output
directory, outputs, and variables and flags that are required."""
config = self._TargetConfig(config)
midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
def midl(name, default=None):
return self.ConvertVSMacros(midl_get(name, default=default),
config=config)
tlb = midl('TypeLibraryName', default='${root}.tlb')
header = midl('HeaderFileName', default='${root}.h')
dlldata = midl('DLLDataFileName', default='dlldata.c')
iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
proxy = midl('ProxyFileName', default='${root}_p.c')
# Note that .tlb is not included in the outputs as it is not always
# generated depending on the content of the input idl file.
outdir = midl('OutputDirectory', default='')
output = [header, dlldata, iid, proxy]
variables = [('tlb', tlb),
('h', header),
('dlldata', dlldata),
('iid', iid),
('proxy', proxy)]
# TODO(scottmg): Are there configuration settings to set these flags?
target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
return outdir, output, variables, flags
def _LanguageMatchesForPch(source_ext, pch_source_ext):
c_exts = ('.c',)
cc_exts = ('.cc', '.cxx', '.cpp')
return ((source_ext in c_exts and pch_source_ext in c_exts) or
(source_ext in cc_exts and pch_source_ext in cc_exts))
class PrecompiledHeader(object):
"""Helper to generate dependencies and build rules to handle generation of
precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
"""
def __init__(
self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
self.settings = settings
self.config = config
pch_source = self.settings.msvs_precompiled_source[self.config]
self.pch_source = gyp_to_build_path(pch_source)
filename, _ = os.path.splitext(pch_source)
self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
def _PchHeader(self):
"""Get the header that will appear in an #include line for all source
files."""
return os.path.split(self.settings.msvs_precompiled_header[self.config])[1]
def GetObjDependencies(self, sources, objs, arch):
"""Given a list of sources files and the corresponding object files,
returns a list of the pch files that should be depended upon. The
additional wrapping in the return value is for interface compatibility
with make.py on Mac, and xcode_emulation.py."""
assert arch is None
if not self._PchHeader():
return []
pch_ext = os.path.splitext(self.pch_source)[1]
for source in sources:
if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
return [(None, None, self.output_obj)]
return []
def GetPchBuildCommands(self, arch):
"""Not used on Windows as there are no additional build steps required
(instead, existing steps are modified in GetFlagsModifications below)."""
return []
def GetFlagsModifications(self, input, output, implicit, command,
cflags_c, cflags_cc, expand_special):
"""Get the modified cflags and implicit dependencies that should be used
for the pch compilation step."""
if input == self.pch_source:
pch_output = ['/Yc' + self._PchHeader()]
if command == 'cxx':
return ([('cflags_cc', list(map(expand_special, cflags_cc + pch_output)))],
self.output_obj, [])
elif command == 'cc':
return ([('cflags_c', list(map(expand_special, cflags_c + pch_output)))],
self.output_obj, [])
return [], output, implicit
vs_version = None
def GetVSVersion(generator_flags):
global vs_version
if not vs_version:
vs_version = gyn.MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'),
allow_fallback=False)
return vs_version
def _GetVsvarsSetupArgs(generator_flags, arch):
vs = GetVSVersion(generator_flags)
return vs.SetupScript()
def ExpandMacros(string, expansions):
"""Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
for the canonical way to retrieve a suitable dict."""
if '$' in string:
for old, new in expansions.items():
assert '$(' not in new, new
string = string.replace(old, new)
return string
def _ExtractImportantEnvironment(output_of_set):
"""Extracts environment variables required for the toolchain to run from
a textual dump output by the cmd.exe 'set' command."""
envvars_to_save = (
'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
'include',
'lib',
'libpath',
'path',
'pathext',
'systemroot',
'temp',
'tmp',
)
env = {}
for line in output_of_set.splitlines():
for envvar in envvars_to_save:
if re.match(envvar + '=', line.lower()):
var, setting = line.split('=', 1)
if envvar == 'path':
# Our own rules (for running gyp-win-tool) and other actions in
# Chromium rely on python being in the path. Add the path to this
# python here so that if it's not in the path when ninja is run
# later, python will still be found.
setting = os.path.dirname(sys.executable) + os.pathsep + setting
env[var.upper()] = setting
break
for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
if required not in env:
raise Exception('Environment variable "%s" '
'required to be set to valid path' % required)
return env
def _FormatAsEnvironmentBlock(envvar_dict):
"""Format as an 'environment block' directly suitable for CreateProcess.
Briefly this is a list of key=value\0, terminated by an additional \0. See
CreateProcess documentation for more details."""
block = ''
nul = '\0'
for key, value in envvar_dict.items():
block += key + '=' + value + nul
block += nul
return block
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
system_includes, open_out):
"""It's not sufficient to have the absolute path to the compiler, linker,
etc. on Windows, as those tools rely on .dlls being in the PATH. We also
need to support both x86 and x64 compilers within the same build (to support
msvs_target_platform hackery). Different architectures require a different
compiler binary, and different supporting environment variables (INCLUDE,
LIB, LIBPATH). So, we extract the environment here, wrap all invocations
of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
sets up the environment, and then we do not prefix the compiler with
an absolute path, instead preferring something like "cl.exe" in the rule
which will then run whichever the environment setup has put in the path.
When the following procedure to generate environment files does not
meet your requirement (e.g. for custom toolchains), you can pass
"-G ninja_use_custom_environment_files" to the gyp to suppress file
generation and use custom environment files prepared by yourself."""
archs = ('x86', 'x64')
if generator_flags.get('ninja_use_custom_environment_files', 0):
cl_paths = {}
for arch in archs:
cl_paths[arch] = 'cl.exe'
return cl_paths
vs = GetVSVersion(generator_flags)
cl_paths = {}
for arch in archs:
# Extract environment variables for subprocesses.
args = vs.SetupScript(arch)
args.extend(('&&', 'set'))
popen = subprocess.Popen(
args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
variables, _ = popen.communicate()
env = _ExtractImportantEnvironment(variables)
# Inject system includes from gyn files into INCLUDE.
if system_includes:
system_includes = system_includes | OrderedSet(
env.get('INCLUDE', '').split(';'))
env['INCLUDE'] = ';'.join(system_includes)
env_block = _FormatAsEnvironmentBlock(env)
f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'wb')
f.write(env_block)
f.close()
# Find cl.exe location for this architecture.
args = vs.SetupScript(arch)
args.extend(('&&',
'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
output, _ = popen.communicate()
cl_paths[arch] = _ExtractCLPath(output)
return cl_paths
def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
"""Emulate behavior of msvs_error_on_missing_sources present in the msvs
generator: Check that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation when building via
VS, and we want this check to match for people/bots that build using ninja,
so they're not surprised when the VS build fails."""
if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
no_specials = [x for x in sources if '$' not in x]
relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
missing = [x for x in relative if not os.path.exists(x)]
if missing:
# They'll look like out\Release\..\..\stuff\things.cc, so normalize the
# path for a slightly less crazy looking output.
cleaned_up = [os.path.normpath(x) for x in missing]
raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
# Sets some values in default_variables, which are required for many
# generators, run on Windows.
def CalculateCommonVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
# Set a variable so conditions can be based on msvs_version.
msvs_version = gyn.msvs_emulation.GetVSVersion(generator_flags)
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
| bsd-3-clause | -70,303,189,702,091,310 | 42.88208 | 83 | 0.657921 | false |
Tjorriemorrie/trading | 09_scalping/s-waitfor.py | 1 | 2508 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import linear_model
from features import calculateTargets
from simulate import backtest
from pprint import pprint
currency = 'EURUSD'
interval = '60'
factor = 10000
df = pd.read_csv(
r'../data/' + currency.upper() + interval + '.csv',
names=['date', 'time', 'open', 'high', 'low', 'close', 'volume'],
dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float', 'volume': 'int'},
#parse_dates=[[0, 1]],
# index_col=0,
)
df = df.iloc[-14400:].reset_index()
print df.tail()
print 'calculating targets...'
calculateTargets(df)
#bullMean = df['targetBull'].mean()
#bullStd = df['targetBull'].std()
#print 'high mean std', bullMean, bullStd
bearMean = df['targetBear'].mean()
bearStd = df['targetBear'].std()
print 'bear mean std', bearMean, bearStd
print 'backtesting...'
takeProfit = 500.
stopLoss = 540.
entry = 20.
waitFors = [wf + 0. for wf in range(10, 190, 10)]
exitAt = 530.
totalNpt = []
totalRat = []
for waitFor in waitFors:
print '\nwait for', waitFor
wins, losses = backtest(df, takeProfit, stopLoss, entry, waitFor, exitAt, factor)
profit = sum([w['outcome'] for w in wins])
print 'wins', len(wins), round(profit, 4)
loss = sum([l['outcome'] for l in losses])
print 'loss', len(losses), round(loss, 4)
net = profit - loss
npt = int((net / len(wins + losses)) * factor)
ratio = len(wins) / (len(wins) + len(losses) + 0.)
print 'net', round(net, 4), 'npt', npt, '%', int(ratio * 100)
totalNpt.append(net)
totalRat.append(ratio)
print '\n'
#pprint(totalNpt)
N = len(totalNpt)
#totalNpt = (20, 35, 30, 35, 27)
#menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, totalNpt, width, color='r')
#womenMeans = (25, 32, 34, 20, 25)
#womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, totalRat, width, color='y')
# add some text for labels, title and axes ticks
ax.set_ylabel('Pips')
ax.set_title('Results')
ax.set_xticks(ind + width)
ax.set_xticklabels(map(int, waitFors))
#ax.legend(
# (rects1[0]),
# ('Npt',),
#)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
#autolabel(rects1)
#autolabel(rects2)
plt.show()
| mit | 3,768,308,975,932,503,600 | 25.125 | 96 | 0.629984 | false |
akrherz/iem | htdocs/plotting/auto/scripts100/p139.py | 1 | 6185 | """Top 10 largest, smallest"""
import datetime
try:
from zoneinfo import ZoneInfo
except ImportError:
from backports.zoneinfo import ZoneInfo
from pandas.io.sql import read_sql
from matplotlib.font_manager import FontProperties
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.plot import figure
from pyiem.exceptions import NoDataFound
MDICT = {
"all": "No Month/Time Limit",
"spring": "Spring (MAM)",
"fall": "Fall (SON)",
"winter": "Winter (DJF)",
"summer": "Summer (JJA)",
"jan": "January",
"feb": "February",
"mar": "March",
"apr": "April",
"may": "May",
"jun": "June",
"jul": "July",
"aug": "August",
"sep": "September",
"oct": "October",
"nov": "November",
"dec": "December",
}
PDICT = {"largest": "Largest", "smallest": "Smallest"}
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["data"] = True
desc["cache"] = 86400
desc[
"description"
] = """This table presents the 10 largest or smallest differences
between the lowest and highest air temperature for a local calendar
day. Some stations have auxillary products that provide 'daily' values
over a date defined always in standard time. This plot also presents
sprites of the temperature time series starting at 12 hours before the
denoted date and ending at 12 hours after the date. The sprite often
quickly points out bad data points, sigh, but also easily shows if the
temperature change was an increase during the day or decrease."""
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="AMW",
label="Select Station:",
network="IA_ASOS",
),
dict(
type="select",
name="v",
default="largest",
label="Show largest or smallest differences?",
options=PDICT,
),
dict(
type="select",
name="month",
default="all",
label="Month Limiter",
options=MDICT,
),
]
return desc
def plot_date(dbconn, ax, i, date, station, tz):
"""plot date."""
# request 36 hours
sts = datetime.datetime(date.year, date.month, date.day, tzinfo=tz)
sts = sts - datetime.timedelta(hours=12)
ets = sts + datetime.timedelta(hours=48)
df = read_sql(
"SELECT valid at time zone 'UTC' as valid, tmpf from alldata "
"where station = %s and "
"valid >= %s and valid <= %s and tmpf is not null ORDER by valid ASC",
dbconn,
params=(station, sts, ets),
index_col=None,
)
if df.empty:
return
df["valid"] = df["valid"].dt.tz_localize(ZoneInfo("UTC"))
df["norm"] = (df["tmpf"] - df["tmpf"].min()) / (
df["tmpf"].max() - df["tmpf"].min()
)
df["xnorm"] = [
x.total_seconds() for x in (df["valid"].dt.to_pydatetime() - sts)
]
lp = ax.plot(df["xnorm"], df["norm"] + i)
ax.text(
df["xnorm"].values[-1],
df["norm"].values[-1] + i,
date.strftime("%-d %b %Y"),
va="center",
color=lp[0].get_color(),
)
def plotter(fdict):
"""Go"""
font0 = FontProperties()
font0.set_family("monospace")
font0.set_size(16)
pgconn = get_dbconn("iem")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["zstation"]
month = ctx["month"]
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month, 999]
order = "DESC" if ctx["v"] == "largest" else "ASC"
df = read_sql(
f"""
SELECT day as date, max_tmpf as max, min_tmpf as min,
max_tmpf::int - min_tmpf::int as difference
from summary s JOIN stations t on (s.iemid = t.iemid)
where t.id = %s and t.network = %s
and extract(month from day) in %s
and max_tmpf is not null and min_tmpf is not null
ORDER by difference {order}, date DESC LIMIT 10
""",
pgconn,
params=(station, ctx["network"], tuple(months)),
parse_dates=("date",),
index_col=None,
)
if df.empty:
raise NoDataFound("No Data Found,")
df["rank"] = df["difference"].rank(
ascending=(ctx["v"] == "smallest"), method="min"
)
ab = ctx["_nt"].sts[station]["archive_begin"]
if ab is None:
raise NoDataFound("Unknown station metadata.")
tz = ZoneInfo(ctx["_nt"].sts[station]["tzname"])
title = (
"%s [%s] %s-%s\n"
"Top 10 %s Local Calendar Day [%s] Temperature Differences"
) % (
ctx["_nt"].sts[station]["name"],
station,
ab.year,
datetime.date.today().year,
PDICT[ctx["v"]],
month.capitalize(),
)
fig = figure(title=title)
fig.text(
0.1, 0.81, " # Date Diff Low High", fontproperties=font0
)
y = 0.74
ax = fig.add_axes([0.5, 0.1, 0.3, 0.69])
i = 10
dbconn = get_dbconn("asos")
for _, row in df.iterrows():
fig.text(
0.1,
y,
("%2.0f %11s %3.0f %3.0f %3.0f")
% (
row["rank"],
row["date"].strftime("%d %b %Y"),
row["difference"],
row["min"],
row["max"],
),
fontproperties=font0,
)
plot_date(dbconn, ax, i, row["date"], station, tz)
y -= 0.07
i -= 1
ax.set_title("Hourly Temps On Date & +/-12 Hrs")
ax.set_ylim(1, 11)
ax.axvline(12 * 3600, color="tan")
ax.axvline(36 * 3600, color="tan")
ax.axis("off")
return fig, df
if __name__ == "__main__":
plotter(dict(zstation="MCW", network="IA_ASOS"))
| mit | -8,857,009,840,062,342,000 | 28.735577 | 78 | 0.538238 | false |
ololobster/cvidone | tests/db_with_data/fill.py | 1 | 8007 | # Licensed under The MIT License.
# For full copyright and license information, please see the LICENSE.txt.
import os
os.environ["CONF_DIR"] = os.path.join(
os.path.dirname(os.path.realpath(__file__))
, ".."
, ".."
, "etc"
)
from cvidone import settings
from cvidone.model.user import User
from cvidone.model.section_instance import SectionInstance
from cvidone.model.label import Label
from cvidone.model.task import Task
from cvidone.model.trigger import Trigger
from cvidone.model.periodicity import Periodicity
# Libs.
from datetime import datetime, timedelta
db = settings.getDB()
def createTasks(section, raw_tasks, parent_task=None):
for raw_task in reversed(raw_tasks):
task = Task.createEmptyInstance()
task.section = section
task.name = raw_task["name"]
if ("deadline" in raw_task):
task.deadline = raw_task["deadline"]
if (parent_task is not None):
task.parent = parent_task
task.completed = raw_task["completed"]
task.save(db)
if ("tasks" in raw_task):
createTasks(section, raw_task["tasks"], task)
def createALotOfTasks(section):
for i in reversed(range(1, 2001)):
task_on_1st_level = Task.createEmptyInstance()
task_on_1st_level.section = section
task_on_1st_level.name = "task %i" % (i)
task_on_1st_level.save(db)
if (0 == (i % 10)):
for j in reversed(range(1, 11)):
task_on_2nd_level = Task.createEmptyInstance()
task_on_2nd_level.section = section
task_on_2nd_level.name = "task %i.%i" % (i, j)
task_on_2nd_level.parent = task_on_1st_level
task_on_2nd_level.save(db)
if (0 == (j % 5)):
for k in reversed(range(1, 4)):
task_on_3rd_level = Task.createEmptyInstance()
task_on_3rd_level.section = section
task_on_3rd_level.name = "task %i.%i.%i" % (i, j, k)
task_on_3rd_level.parent = task_on_2nd_level
task_on_3rd_level.save(db)
def createTriggers(section, raw_triggers):
for raw_trigger in raw_triggers:
trigger = Trigger.getNew(section)
trigger.name = raw_trigger["name"]
trigger.periodicity = raw_trigger["periodicity"]
trigger.save(db)
def createSections(user, raw_sections, parent_si=None):
for raw_section in reversed(raw_sections):
si = SectionInstance.createEmptyInstance(user)
si.name = raw_section["name"]
si.comment = "OLOLO"
if (parent_si is not None):
si.parent = parent_si
si.save(db)
if ("sections" in raw_section):
createSections(user, raw_section["sections"], si)
if ("tasks" in raw_section):
createTasks(si.section, raw_section["tasks"])
if ("triggers" in raw_section):
createTriggers(si.section, raw_section["triggers"])
def createLabels(user, raw_labels, parent_label=None):
for raw_label in reversed(raw_labels):
label = Label.createEmptyInstance(user)
label.name = raw_label["name"]
label.comment = "kekeke"
if (parent_label is not None):
label.parent = parent_label
label.save(db)
if ("labels" in raw_label):
createLabels(user, raw_label["labels"], label)
def createUser(email, pwd, raw_sections, raw_labels=None):
user = User()
user.status = "active"
user.email = email
user.setPassword(pwd)
user.locale = "en"
user.time_zone = "Asia/Calcutta"
user.save(db)
if (raw_labels is not None):
createLabels(user, raw_labels)
createSections(user, raw_sections)
return user
user1 = createUser(
email="[email protected]"
, pwd="qwerty"
, raw_sections=(
{
"name": "TV"
, "sections": (
{
"name": "movies"
, "tasks": (
{"name": "Chinatown (1974)", "completed": False, "deadline": datetime.utcnow()}
, {"name": "Moon (2009)", "completed": False, "deadline": datetime.utcnow() + timedelta(days=1)}
, {"name": "Rope (1948)", "completed": False, "deadline": datetime.utcnow() + timedelta(days=2)}
, {"name": "Death Proof (2007)", "completed": False, "deadline": datetime.utcnow() + timedelta(days=3)}
, {"name": "Memories of Murder (2003)", "completed": False, "deadline": datetime.utcnow() + timedelta(days=4)}
)
}
, {
"name": "serials"
, "tasks": (
{
"name": "Rome"
, "completed": False
, "tasks": (
{"name": "season 1", "completed": True}
, {"name": "season 2", "completed": False}
)
}
, {"name": "Brotherhood of War", "completed": True}
)
}
, {
"name": "documental"
, "tasks": (
{"name": "1", "completed": False}
, {"name": "1", "completed": False}
, {"name": "1", "completed": False}
, {"name": "1", "completed": False}
)
}
)
}
, {
"name": "books"
, "tasks": (
{"name": "Financier - Theodore Driser", "completed": False}
, {"name": "Grapes of Wrath", "completed": False}
)
}
, {
"name": "GYM"
, "tasks": (
{"name": "GYM", "completed": True}
, {"name": "GYM", "completed": True}
)
, "triggers": (
{"name": "GYM day", "periodicity": Periodicity(period="day")}
, {"name": "GYM week", "periodicity": Periodicity(period="week", days=[2, 6])}
, {"name": "GYM month", "periodicity": Periodicity(period="month", days=[4, 7])}
, {"name": "GYM year", "periodicity": Periodicity(period="year", dates=["01.06", "01.12"])}
)
}
, {
"name": "y"
}
)
, raw_labels=(
{
"name": "writers"
, "labels": (
{"name": "Theodore Dreiser"}
, {"name": "John Steinbeck"}
, {"name": "Erich Maria Remarque"}
, {"name": "George R. R. Martin"}
)
}
, {
"name": "contacts"
, "labels": (
{"name": "Robb Stark"}
, {"name": "Tyrion Lannister"}
, {"name": "Gregor Clegane"}
, {"name": "Stannis Baratheon"}
, {"name": "Daenerys Targaryen"}
)
}
)
)
user2 = createUser(
email="[email protected]"
, pwd="qwerty"
, raw_sections=(
{
"name": "my section"
, "tasks": (
{"name": "perform the Last Judgement", "completed": False}
, {"name": "send Jesus to Earth", "completed": True}
, {"name": "make the Great Flood", "completed": True}
)
},
)
)
# Shared section.
si_id = db.selectOne("SELECT id FROM sections_instances WHERE (name = 'movies') LIMIT 1")
si1 = SectionInstance.getExisting(db, si_id)
si2 = SectionInstance.createEmptyInstance(user2)
si2.name = "shared section"
si2.section = si1.section
si2.save(db)
# Section with a lot of tasks.
si_with_a_lot_of_tasks = SectionInstance.createEmptyInstance(user1)
si_with_a_lot_of_tasks.name = "section with a lot of tasks"
si_with_a_lot_of_tasks.save(db)
createALotOfTasks(si_with_a_lot_of_tasks.section)
| mit | -5,838,180,522,105,701,000 | 32.927966 | 132 | 0.504309 | false |
Konubinix/weboob | modules/arte/pages.py | 1 | 3135 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.deprecated.browser import Page
from weboob.tools.html import html2text
from weboob.capabilities import NotAvailable
from weboob.capabilities.image import BaseImage
from weboob.capabilities.collection import Collection
from .video import ArteLiveVideo
class ArteLiveVideoPage(Page):
def get_video(self, video=None):
if not video:
video = ArteLiveVideo(self.group_dict['id'])
div = self.document.xpath('//div[@class="bloc-presentation"]')[0]
description = self.parser.select(div,
'div[@class="field field-name-body field-type-text-with-summary field-label-hidden bloc-rte"]',
1,
method='xpath')
video.description = html2text(self.parser.tostring(description))
json_url = self.document.xpath('//div[@class="video-container"]')[0].attrib['arte_vp_url']
return json_url, video
class ArteLivePage(Page):
def iter_resources(self):
items = list()
for el in self.document.xpath('//ul[@class="filter-liste"]/li'):
_id = el.attrib['data-target'].replace('video_box_tab_','')
text = self.parser.select(el, 'a/span', 1, method='xpath').text
item = Collection([u'arte-live', u'%s' % _id], u'%s' % (text))
items.append(item)
return items
def iter_videos(self, cat, lang='fr'):
articles = self.document.xpath('//div[@id="video_box_tab_%s"]/article' % cat)
videos = list()
for article in articles:
_id = article.attrib['about']
title = self.parser.select(article,
'div/div[@class="info-article "]/div/h3/a',
1,
method='xpath').text
thumbnail = self.parser.select(article,
'div/div/a/figure/span/span',
1,
method='xpath').attrib['data-src']
video = ArteLiveVideo(_id)
video.title = u'%s' % title
video.thumbnail = BaseImage(thumbnail)
video.thumbnail.url = video.thumbnail.id
video.set_empty_fields(NotAvailable, ('url',))
videos.append(video)
return videos
| agpl-3.0 | -6,551,692,988,662,806,000 | 40.25 | 136 | 0.588198 | false |
vadim-ivlev/STUDY | coding/TwoSum.py | 1 | 1170 | # import pytest
'''
https://www.testdome.com/questions/python/two-sum/14289?questionIds=14288,14289&generatorId=92&type=fromtest&testDifficulty=Easy
Write a function that, given a list and a target sum, returns zero-based indices of any two distinct elements whose sum is equal to the target sum. If there are no such elements, the function should return (-1, -1).
For example, find_two_sum([1, 3, 5, 7, 9], 12) should return a tuple containing any of the following pairs of indices:
1 and 4 (3 + 9 = 12)
2 and 3 (5 + 7 = 12)
3 and 2 (7 + 5 = 12)
4 and 1 (9 + 3 = 12)
'''
def find_index(m,a):
try:
return a.index(m)
except :
return -1
def find_two_sum(a, s):
'''
>>> (3, 4) == find_two_sum([1, 3, 5, 7, 9], 12)
True
'''
if len(a)<2:
return (-1,-1)
idx = dict( (v,i) for i,v in enumerate(a) )
for i in a:
m = s - i
# k = find_index(m,a)
k = idx.get(m,-1)
if k != -1 :
return (i,k)
return (-1, -1)
print(find_two_sum([1, 3, 5, 7, 9], 12))
if __name__ == '__main__':
import doctest; doctest.testmod()
# pytest.main()
# pass | mit | -3,817,025,658,587,270,000 | 21.519231 | 215 | 0.559829 | false |
DanSearle/CheckMail | checkmail.py | 1 | 4194 | #!/usr/bin/env python
"""
Simple Python module to parse a Thunderbird mail file and
scan each email message with ClamAV in order to detect
suspect messages.
"""
import pyclamd
import argparse
import mailbox
import logging
log = logging.getLogger(__name__)
default_net = ("localhost", 3310)
class ScanMessage:
def __init__(self, key, message):
self._message = message
self.key = key
self.signature = None
self._scan()
def _scan(self):
log.debug("Scanning message {0}".format(self))
result = pyclamd.scan_stream(self._message.as_string())
log.debug("Scanned message {0}, result {1}".format(self, result))
if result:
self.signature = result["stream"]
def __repr__(self):
message = self._message
return "From: {0}, Subject: {1}, Signature: {2}".format(message["From"],
message["Subject"],
self.signature)
def parse_command_line():
class HostPortAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(HostPortAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
values = values.split(":")
if len(values) == 1:
values.append(3310)
else:
values[1] = int(values[1])
setattr(namespace, self.dest, tuple(values))
parser = argparse.ArgumentParser()
parser.add_argument('mailfile', nargs='+', type=argparse.FileType('r'),
help="mbox mail file to parse, must be a file and not stdin")
parser.add_argument('-c', '--clean', action="store_true",
help="Set to automatically remove messages which have detected viruses")
parser.add_argument("-v", "--verbose", dest="verbose_count",
action="count", default=0,
help="increases log verbosity for each occurence.")
group = parser.add_argument_group('Clamd Connection')
group_ex = group.add_mutually_exclusive_group()
group_ex.add_argument('-s', '--socket', metavar="SOCKET", type=str,
default="/var/run/clamav/clamd.ctl",
help="Socket file to contact clamd")
group_ex.add_argument('-n', '--network', metavar="HOST:PORT", type=str, action=HostPortAction,
default=default_net,
help="Host and port to contact clamd, e.g. localhost:3310")
arguments = parser.parse_args()
logging.basicConfig(level=max(2 - arguments.verbose_count, 0) * 10)
return arguments
if __name__ == "__main__":
args = parse_command_line()
if args.network == default_net:
try:
pyclamd.init_unix_socket(args.socket)
except:
pyclamd.init_network_socket(args.network[0], args.network[1])
else:
pyclamd.init_network_socket(args.network[0], args.network[1])
for filename in args.mailfile:
log.debug("Reading mboxfile {0}".format(filename.name))
mbox = mailbox.mbox(filename.name)
log.debug("Loaded mboxfile {0}".format(filename.name))
if args.clean:
log.debug("Locking mailbox {0}".format(filename.name))
mbox.lock()
try:
virus_mail = (y for y in (ScanMessage(key, message) for key, message in mbox.iteritems()) if y.signature)
for v in virus_mail:
log.info("Found virus in message {0}".format(v))
if args.clean:
log.debug("Cleaning {0} from mailbox {1}".format(v, filename.name))
mbox.remove(v.key)
log.info("Message {0} removed".format(v.key))
finally:
log.debug("Flushing mailbox {0}".format(filename.name))
mbox.flush()
log.debug("Closing mailbox {0}".format(filename.name))
mbox.close()
| mit | -2,563,605,324,226,916,000 | 38.942857 | 117 | 0.565331 | false |
simos/keyboardlayouteditor | Key.py | 1 | 8239 | #!/usr/bin/env python
# -*- encoding: UTF-8 -*-
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
import gtk
import gobject
import Common
import DumbKey
import KeyValue
from KeyDict import KeyDict
class Key(gtk.EventBox):
TARGET_TYPE_TEXT = 80
__toKey__ = [ ( "text/plain", 0, TARGET_TYPE_TEXT ) ]
def __init__(self, size = 1, keycode = None, vertical = False,
keytype = Common.keytypes.SIMPLE,
level1 = '', level2 = '', level3 = '', level4 = ''):
gtk.EventBox.__init__(self)
self.keycode = keycode
#print "Key: Invoked __init__(), level1:", level1
self.key = DumbKey.DumbKey(size, keycode, vertical, keytype,
level1, level2, level3, level4)
self.add(self.key)
self.set_events(gtk.gdk.BUTTON_PRESS_MASK)
self.connect("button_press_event", self.button_press_event_callback)
self.connect("button_release_event", self.button_release_event_callback)
# Initialise the context menu
self.context_menu = gtk.Menu()
self.context_menu_item = gtk.MenuItem("Remove")
self.context_menu.add(self.context_menu_item)
self.context_menu_item.connect("activate", self.menu_item_activate_callback,
"remove")
self.context_menu.connect("selection-done", self.menu_selection_done_callback)
self.context_menu.show_all()
# Adding DnD support
if self.keycode not in KeyDict.IgnoreKeys:
self.connect("drag_data_received", self.drag_data_get_callback)
self.drag_dest_set(gtk.DEST_DEFAULT_MOTION |
gtk.DEST_DEFAULT_HIGHLIGHT |
gtk.DEST_DEFAULT_DROP,
self.__toKey__, gtk.gdk.ACTION_COPY)
self.add_events(self.get_events() | gtk.gdk.EXPOSURE_MASK
| gtk.gdk.BUTTON1_MOTION_MASK | gtk.gdk.BUTTON_PRESS_MASK
| gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.DRAG_MOTION
| gtk.gdk.DROP_FINISHED | gtk.gdk.DRAG_STATUS | gtk.gdk.ENTER_NOTIFY
| gtk.gdk.DRAG_ENTER)
# self.connect("motion-notify-event", self.mouse_move_signal)
self.connect("enter_notify_event", self.enter_notify_callback)
self.connect("leave_notify_event", self.leave_notify_callback)
self.connect("drag_drop", self.drag_drop_callback)
self.connect("drag_motion", self.drag_motion_callback)
self.connect("drag_leave", self.drag_leave_callback)
self.tooltips = gtk.Tooltips()
self.tooltips.set_tip(self, "Keycode: " + self.keycode)
def drag_data_get_callback(self, widget, context, x, y, selection, targetType, time):
#print "Callback drag_data_get: Received a callback for '%(str)s', segment: %(s)d at %(x)d, %(y)d" % \
# { "s": self.key.pending["keysegment"], "str": selection.data.decode('utf-8'), "x": x, "y": y }
if selection.data[0] == '\\' and \
(selection.data[1] == 'u' or selection.data[1] == 'U'):
newval = selection.data.decode('unicode-escape')
else:
newval = selection.data
self.key.pending["ispending"] = True
self.key.pending["value"] = newval
#print "drag_data_get"
#print "self.key.pending[\"keysegment\"]:", self.key.pending["keysegment"]
self.key.keyvalues[self.key.pending["keysegment"]].add(newval)
self.key.extract_display_keyvalues()
self.key.redraw()
self.set_tooltip()
Common.addtostatusbar('Added ' + newval + ' to key ' + self.keycode + \
', at level ' + str(self.key.pending["keysegment"]))
def mouse_move_callback(self, widget, event):
pass
def enter_notify_callback(self, widget, event):
#print "enter_notify"
self.key.do_highlight(True)
self.set_tooltip()
def leave_notify_callback(self, widget, event):
#self.key.infowin.hide()
#print "leave_notify"
self.key.do_highlight(False, event.x, event.y)
def drag_drop_callback(self, widget, drag_context, x, y, timestamp):
# print "drag_drop"
pass
def drag_motion_callback(self, widget, drag_context, x, y, timestamp):
#print "drag_motion"
self.key.highlight = True
self.key.do_highlight(True, x, y)
self.key.pending["keysegment"] = self.find_highlighted_segment(x, y)
def drag_leave_callback(self, widget, drag_context, timestamp):
#print "drag_leave"
self.key.highlight = False
self.key.do_highlight(False)
def button_press_event_callback(self, widget, event):
if self.keycode not in KeyDict.IgnoreKeys:
if (event.button == 3):
self.key.popup_highlight = True
self.context_menu.popup(None, None, None, event.button, event.time)
self.key.pending["keysegment"] = self.find_highlighted_segment(event.x, event.y)
# Tell calling code that we have handled this event.
return True
# Tell calling code we have not handled this code; pass it on
return False
def button_release_event_callback(self, widget, event):
self.key.popup_highlight = False
def menu_selection_done_callback(self, menushell):
""" Dehighlight highlighted segment """
self.key.popup_highlight = False
self.key.redraw()
def menu_item_activate_callback(self, menuitem, action):
if action == "remove":
self.key.keyvalues[self.key.pending["keysegment"]].add('')
self.key.extract_display_keyvalues()
self.set_tooltip()
self.key.redraw()
def myname(self):
return "[%(k1)s, %(k2)s, %(k3)s, %(k4)s]" % \
{ "k1": self.key.keyvalues[Common.keysegments.ONE].getValue(),
"k2": self.key.keyvalues[Common.keysegments.TWO].getValue(),
"k3": self.key.keyvalues[Common.keysegments.THREE].getValue(),
"k4": self.key.keyvalues[Common.keysegments.FOUR].getValue()
}
def find_highlighted_segment(self, x, y):
dummy, dummy, width, height = self.get_allocation()
#print "find:", width, height, x, y
if x != -1 and y != -1:
if x <= width/2:
if y <= height/2:
return Common.keysegments.TWO
else:
return Common.keysegments.ONE
elif y <= height/2:
return Common.keysegments.FOUR
else:
return Common.keysegments.THREE
else:
return Common.keysegments.NONE
def set_tooltip(self):
tooltip_string = 'Keycode: ' + self.keycode
counter_empty = 0
for counter in Common.keysegmentslist:
if self.key.dvalues[counter].getType() == Common.keyvaluetype.NOSYMBOL:
counter_empty +=1
if counter_empty < len(Common.keysegmentslist):
for counter in Common.keysegmentslist:
tooltip_string += '\n' + str(counter) + '. ' +\
str(self.key.dvalues[counter].getValue()) + ' ' +\
self.key.dvalues[counter].getPValue()
self.tooltips.set_tip(self, tooltip_string)
| gpl-3.0 | -7,022,255,439,730,606,000 | 43.777174 | 121 | 0.577497 | false |
eerimoq/asn1tools | examples/benchmarks/question/question.py | 1 | 2893 | #!/usr/bin/env python
"""Encoding and decoding of a question once for each codec.
Example execution:
$ ./question.py
ASN.1 specification:
-- A simple protocol taken from Wikipedia.
Foo DEFINITIONS ::= BEGIN
Question ::= SEQUENCE {
id INTEGER,
question IA5String
}
Answer ::= SEQUENCE {
id INTEGER,
answer BOOLEAN
}
END
Question to encode: {'id': 1, 'question': 'Is 1+1=3?'}
BER:
Encoded: 300e0201011609497320312b313d333f (16 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
DER:
Encoded: 300e0201011609497320312b313d333f (16 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
JER:
Encoded: 7b226964223a312c227175657374696f6e223a22497320312b313d333f227d (31 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
OER:
Encoded: 010109497320312b313d333f (12 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
PER:
Encoded: 010109497320312b313d333f (12 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
UPER:
Encoded: 01010993cd03156c5eb37e (11 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
XER:
Encoded: 3c5175657374696f6e3e3c69643e313c2f69643e3c7175657374696f6e3e497320312b313d333f3c2f7175657374696f6e3e3c2f5175657374696f6e3e (61 bytes)
Decoded: {'id': 1, 'question': 'Is 1+1=3?'}
Protocol Buffers:
Encoded: 08011209497320312b313d333f (13 bytes)
Decoded:
id: 1
question: "Is 1+1=3?"
$
"""
from __future__ import print_function
import os
from binascii import hexlify
import asn1tools
from foo_pb2 import Question
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
FOO_ASN_PATH = os.path.join(SCRIPT_DIR,
'..',
'..',
'..',
'tests',
'files',
'foo.asn')
# Print the specification.
print('ASN.1 specification:')
print()
with open(FOO_ASN_PATH) as fin:
print(fin.read())
# The question to encode.
question = {'id': 1, 'question': 'Is 1+1=3?'}
print("Question to encode:", question)
# Encode and decode the question once for each codec.
for codec in ['ber', 'der', 'jer', 'oer', 'per', 'uper', 'xer']:
foo = asn1tools.compile_files(FOO_ASN_PATH, codec)
encoded = foo.encode('Question', question)
decoded = foo.decode('Question', encoded)
print()
print('{}:'.format(codec.upper()))
print('Encoded: {} ({} bytes)'.format(hexlify(encoded).decode('ascii'),
len(encoded)))
print('Decoded:', decoded)
# Also encode using protocol buffers.
question = Question()
question.id = 1
question.question = 'Is 1+1=3?'
encoded = question.SerializeToString()
decoded = question
print()
print('Protocol Buffers:')
print('Encoded: {} ({} bytes)'.format(hexlify(encoded).decode('ascii'),
len(encoded)))
print('Decoded:')
print(decoded)
| mit | 7,377,335,358,063,260,000 | 23.516949 | 142 | 0.6215 | false |
andela-akhenda/maisha-goals | app/__init__.py | 1 | 1333 | import os
from flask import Flask, request, g
from flask_sqlalchemy import SQLAlchemy
from .decorators import json
db = SQLAlchemy()
def create_app(config_name):
""" Create the usual Flask application instance."""
app = Flask(__name__)
# Apply configuration
cfg = os.path.join(os.getcwd(), 'config', config_name + '.py')
app.config.from_pyfile(cfg)
# initialize extensions
db.init_app(app)
# register blueprints
from .api_v1 import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/api/v1')
# authentication token route
from .auth import auth
from .models import User
@app.route('/api/v1', methods=['GET'])
@json
def api_index():
return {
"message": "Welcome to Maisha Goals. Register a new "
" user or login to get started"}
@app.route('/auth/register', methods=['POST'])
@json
def register_user():
u = User()
u.import_data(request.json)
db.session.add(u)
db.session.commit()
return {
'message': 'Your account has been successfuly created'
}, 201, {'Location': u.get_url()}
@app.route('/auth/login')
@auth.login_required
@json
def login_user():
return {'token': g.user.generate_auth_token()}
return app
| mit | 2,195,087,826,035,797,000 | 24.634615 | 66 | 0.613653 | false |
wehlutyk/brainscopypaste | brainscopypaste/filter.py | 1 | 9091 | """Filter clusters and quotes to clean to MemeTracker dataset.
This module defines the :class:`ClusterFilterMixin` mixin which adds filtering
capabilities to :class:`~.db.Cluster`, and the :func:`filter_clusters` function
which uses that mixin to filter the whole MemeTracker dataset. A few other
utility functions are also defined.
"""
from datetime import timedelta
import logging
import click
from progressbar import ProgressBar
from sqlalchemy import func
import numpy as np
from brainscopypaste.utils import (langdetect, session_scope, execute_raw,
memoized)
from brainscopypaste.conf import settings
logger = logging.getLogger(__name__)
class AlreadyFiltered(Exception):
"""Exception raised when trying to filter a dataset that has already been
filtered."""
def filter_clusters(limit=None):
"""Filter the whole MemeTracker dataset by copying all valid
:class:`~.db.Cluster`\ s and :class:`~.db.Quote`\ s and setting their
`filtered` attributes to `True`.
Iterate through all the MemeTracker :class:`~.db.Cluster`\ s, and filter
each of them to see if it's worth keeping. If a :class:`~.db.Cluster` is to
be kept, the function creates a copy of it and all of its kept
:class:`~.db.Quote`\ s, marking them as filtered. Progress of this
operation is printed to stdout.
Once the operation finishes, a VACUUM and an ANALYZE operation are run on
the database so that it recomputes its optimisations.
Parameters
----------
limit : int, optional
If not `None`, stop filtering after `limit` clusters have been seen
(useful for testing purposes).
Raises
------
AlreadyFiltered
If there are already some filtered :class:`~.db.Cluster`\ s or
:class:`~.db.Quote`\ s stored in the database (indicating another
filtering operation has already been completed, or started and
aborted).
"""
from brainscopypaste.db import Session, Cluster, save_by_copy
logger.info('Filtering memetracker clusters')
if limit is not None:
logger.info('Filtering is limited to %s clusters', limit)
click.echo('Filtering all clusters{}...'
.format('' if limit is None else ' (limit={})'.format(limit)))
# Check this isn't already done.
with session_scope() as session:
if session.query(Cluster)\
.filter(Cluster.filtered.is_(True)).count() > 0:
raise AlreadyFiltered('There are already some filtered '
'clusters, aborting.')
query = session.query(Cluster.id)
if limit is not None:
query = query.limit(limit)
cluster_ids = [id for (id,) in query]
logger.info('Got %s clusters to filter', len(cluster_ids))
# Filter.
objects = {'clusters': [], 'quotes': []}
for cluster_id in ProgressBar()(cluster_ids):
with session_scope() as session:
cluster = session.query(Cluster).get(cluster_id)
fcluster = cluster.filter()
if fcluster is not None:
logger.debug('Cluster #%s is kept with %s quotes',
cluster.sid, fcluster.size)
objects['clusters'].append(fcluster)
objects['quotes'].extend(fcluster.quotes)
else:
logger.debug('Cluster #%s is dropped', cluster.sid)
click.secho('OK', fg='green', bold=True)
logger.info('Kept %s clusters and %s quotes after filtering',
len(objects['clusters']), len(objects['quotes']))
# Save.
logger.info('Saving filtered clusters to database')
save_by_copy(**objects)
# Vacuum analyze.
logger.info('Vacuuming and analyzing database')
click.echo('Vacuuming and analyzing... ', nl=False)
execute_raw(Session.kw['bind'], 'VACUUM ANALYZE')
click.secho('OK', fg='green', bold=True)
def _top_id(id):
"""Get the smallest power of ten three orders of magnitude greater than
`id`.
Used to compute :func:`filter_cluster_offset` and
:func:`filter_quote_offset`.
"""
return int(10 ** (np.floor(np.log10(id)) + 3))
@memoized
def filter_cluster_offset():
"""Get the offset to add to filtered :class:`~.db.Cluster` ids.
A filtered :class:`~.db.Cluster`'s id will be its original
:class:`~.db.Cluster`'s id plus this offset. The function is
:func:`~.utils.memoized` since it is called so often.
"""
from brainscopypaste.db import Cluster
with session_scope() as session:
maxid = session.query(func.max(Cluster.id)).scalar()
return _top_id(maxid)
@memoized
def filter_quote_offset():
"""Get the offset to add to filtered :class:`~.db.Quote` ids.
A filtered :class:`~.db.Quote`'s id will be its original
:class:`~.db.Quote`'s id plus this offset. The function is
:func:`~.utils.memoized` since it is called so often.
"""
from brainscopypaste.db import Quote
with session_scope() as session:
maxid = session.query(func.max(Quote.id)).scalar()
return _top_id(maxid)
class ClusterFilterMixin:
"""Mixin for :class:`~.db.Cluster`\ s adding the :meth:`filter` method used
in :func:`filter_clusters`."""
def filter(self):
"""Filter this :class:`~.db.Cluster` and its children
:class:`~.db.Quote`\ s to see if they're worth keeping.
First, iterate through all the children :class:`~.db.Quote`\ s of the
cluster, seeing if each one of them is worth keeping. A
:class:`~.db.Quote` is discarded if it has no urls, less than
:data:`~.settings.MT_FILTER_MIN_TOKENS`, spans longer than
:data:`~.settings.MT_FILTER_MAX_DAYS`, or is not in English. Any
:class:`~.db.Quote` that has none of those problems will be kept.
If after this filtering there are no :class:`~.db.Quote`\ s left, or
the :class:`~.db.Cluster` made of the remaining :class:`~.db.Quote`\ s
still spans longer than :data:`~.settings.MT_FILTER_MAX_DAYS`, the
cluster and all its quotes will be discarded and `None` is returned.
If not, a new :class:`~.db.Cluster` is created with `cluster.filtered =
True` and `cluster.id = original_cluster.id +`
:func:`filter_cluster_offset`. That new cluster points to copies of all
the kept :class:`~.db.Quote`\ s, with `quote.filtered = True` and
`quote.id = original_quote.id +` :func:`filter_quote_offset`. All those
models (new cluster and new quotes) should later be saved to the
database (the method does not do it for you), e.g. by running this
method inside a :func:`~.utils.session_scope`.
Returns
-------
cluster : :class:`~.db.Cluster` or None
The filtered cluster pointing to filtered quotes, or `None` if it
is to be discarded.
Raises
------
AlreadyFiltered
If this cluster is already filtered (i.e.
:attr:`~.db.Cluster.filtered` is `True`).
"""
if self.filtered:
raise AlreadyFiltered('Cluster is already filtered')
min_tokens = settings.MT_FILTER_MIN_TOKENS
max_span = timedelta(days=settings.MT_FILTER_MAX_DAYS)
fcluster = self.clone(id=filter_cluster_offset() + self.id,
filtered=True)
# Examine each quote for min_tokens, max_days, and language.
for quote in self.quotes:
if quote.frequency == 0:
logger.debug('Dropping quote #%s (cluster #%s): '
'no urls', quote.sid, self.sid)
continue
if len(quote.tokens) < min_tokens:
logger.debug('Dropping quote #%s (cluster #%s): '
'not enough tokens', quote.sid, self.sid)
continue
if quote.span > max_span:
logger.debug('Dropping quote #%s (cluster #%s): '
'span too big', quote.sid, self.sid)
continue
if langdetect(quote.string) != 'en':
logger.debug('Dropping quote #%s (cluster #%s): '
'not English', quote.sid, self.sid)
continue
logger.debug('Keeping quote #%s (cluster #%s)',
quote.sid, self.sid)
fquote = quote.clone(id=filter_quote_offset() + quote.id,
cluster_id=fcluster.id, filtered=True)
fcluster.quotes.append(fquote)
# If no quotes where kept, drop the whole cluster.
if fcluster.size == 0:
logger.debug('Dropping cluster #%s: no quotes left', self.sid)
return
# Finally, if the new cluster spans too many days, discard it.
if fcluster.span > max_span:
logger.debug('Dropping cluster #%s: span too big', self.sid)
return
logger.debug('Keeping cluster #%s after filtering', self.sid)
return fcluster
| gpl-3.0 | 3,308,807,656,683,033,000 | 34.932806 | 79 | 0.611484 | false |
sukuba/js-py-document-search | make_index.py | 1 | 5005 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# written for python 3 but also run on python 2
from __future__ import absolute_import, division, print_function, unicode_literals
"""
make index json file from text file tree.
Expects Python 3
https://github.com/sukuba/js-py-document-search
"""
import argparse
import os
import shutil
import datetime
import jsngram.jsngram
import jsngram.dir2
def remove_entries(dest):
"""
remove files and subdirectories at dest
"""
for entry in os.listdir(dest):
fullpath = os.path.join(dest, entry)
if os.path.isfile(fullpath):
os.remove(fullpath)
else:
shutil.rmtree(fullpath)
def make_index_by_files_inc(n, shorter, src, dest, flat, ignore, files_at_once, verbose_print):
"""
text files in src directory will be indexed.
"""
ix = jsngram.jsngram.JsNgram(n, shorter, src, dest, flat, ignore)
entries = jsngram.dir2.list_files(src)
n = len(entries)
for files in (entries[i:i+files_at_once] for i in range(0, n, files_at_once)):
ix.add_files_to_json(files, verbose_print)
print('%d indexes in %d files' % (len(ix.db), len(files)))
for f in files:
print(' ' + f)
print('%d files processed.' % len(entries))
return ix
def make_index(args):
"""
make index json file from text file tree
expected args; src, dest, size, noshorter, flat, once, ignore, verbose
"""
start_time = datetime.datetime.now()
print('Start: ', start_time)
print('Removing current index files ...')
remove_entries(args.dest)
print('Building index files ...')
ix = make_index_by_files_inc(args.size, not args.noshorter, args.src, args.dest,
args.flat, args.ignore, args.once, args.verbose)
print('Adjusting index files ...')
entries = jsngram.dir2.list_files(args.dest)
for entry in entries:
fullpath = os.path.join(args.dest, entry)
jsngram.json2.json_end(fullpath)
print('%d indexes' % len(entries))
print('Done.')
end_time = datetime.datetime.now()
span = end_time - start_time
sspan = '%d seconds' % span.seconds if span.seconds < 3600 else '%d hours' % (span.days * 24)
print('End: ', end_time, ' / runtime: ', sspan)
def main():
r"""
正規化済みのテキスト群からインデックスファイルを作る。
make_index.py E:\scratch txt idx
第1引数: 基準ディレクトリ(フルパス)
第2引数: 変換元テキストディレクトリ(基準からの相対パス)
第3引数: インデックス出力先ディレクトリ(基準からの相対パス)
--size: N-gramの文字長(デフォルト 2)
--noshorter: 文字長より短いインデックスは作成しない(デフォルト False)
--flat: ディレクトリ型でなく、ファイル型のインデックスを作成する(デフォルト False)
--once: 一度にインデックスを作成するファイル数(デフォルト 100)
--ignore: 単語区切りとして、インデックスから除外する文字パターン(正規表現; デフォルト [\s,.,.、。]+)
--verbose: 冗長な情報を出力する
入力は、単一ディレクトリ配下にtree構造で配置された、正規化済みの utf-8 text ファイル群。
出力は、N-gramによりtree構造に作成したインデックスファイル群。
"""
parser = argparse.ArgumentParser(description='正規化済みのテキスト群からインデックスファイルを作る')
parser.add_argument('base', help='基準ディレクトリ(フルパス)')
parser.add_argument('src', help='変換元テキストディレクトリ(基準からの相対パス)')
parser.add_argument('dest', help='インデックス出力先ディレクトリ(基準からの相対パス)')
parser.add_argument('-s', '--size', type=int, default=2, help='N-gramの文字長')
parser.add_argument('-n', '--noshorter', action='store_true', help='文字長より短いインデックスは作成しない')
parser.add_argument('-f', '--flat', action='store_true', help='ディレクトリ型でなく、ファイル型のインデックスを作成する')
parser.add_argument('-o', '--once', type=int, default=100, help='一度にインデックスを作成するファイル数')
parser.add_argument('-i', '--ignore', type=str, default=r'[\s,.,.、。]+', help='単語区切りとして、インデックスから除外する文字パターン')
parser.add_argument('-v', '--verbose', action='store_true', help='冗長な情報を出力する')
args = parser.parse_args()
args.src = os.path.join(args.base, args.src)
args.dest = os.path.join(args.base, args.dest)
if args.verbose:
print(args)
make_index(args)
if __name__ == '__main__':
main()
| mit | -1,257,073,300,274,279,700 | 33.008547 | 111 | 0.641116 | false |
hendiko/git-ring | modules/hooks.py | 1 | 3918 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# Created by Xavier Yin on 2015/3/1
from mail import send_mail
from tpls import TEMPLATES
import abc
import inspect
import re
import shlex
import sys
class Hook(object):
def __init__(self, arg, config):
self.raw_arg = str(arg)
self.args = shlex.split(self.raw_arg)
self.config = config
def get_template(self):
name = "Template%s" % self.__class__.__name__.lstrip("Hook")
klass = TEMPLATES.get(name, None)
if klass is not None:
return klass(self.config)
@abc.abstractmethod
def go(self):
pass
@abc.abstractmethod
def prepare(self):
pass
def get_title_prefix(self):
prefix = "[%(commit)s][%(ref)s][%(sha1)s]" % {
"ref": self.config.commit.ref,
"sha1": self.config.commit.sha1,
"commit": self.config.commit.new[:7]
}
return prefix
class HookTo(Hook):
def go(self):
tpl = self.get_template()
html = tpl.render() if tpl is not None else self.config.mail.msg
subject = "[通知]%(prefix)s %(subject)s" % {
"prefix": self.get_title_prefix(), "subject": self.config.mail.subject.encode('utf8')}
send_mail(self.config, recipients=self.config.mail.to, msg=html, subject=subject)
def prepare(self):
self.config.mail.to += self.args
class HookReview(Hook):
def go(self):
tpl = self.get_template()
html = tpl.render() if tpl is not None else self.config.mail.msg
subject = "[审阅]%(prefix)s %(subject)s" % {
"prefix": self.get_title_prefix(), "subject": self.config.mail.subject.encode('utf8')}
send_mail(self.config, recipients=self.args, to=self.args, msg=html, subject=subject)
def prepare(self):
pass
class HookCc(Hook):
def go(self):
tpl = self.get_template()
html = tpl.render() if tpl is not None else self.config.mail.msg
subject = "[抄送]%(prefix)s %(subject)s" % {
"prefix": self.get_title_prefix(), "subject": self.config.mail.subject.encode('utf8')}
send_mail(self.config, recipients=self.args, to=self.args, msg=html, subject=subject)
def prepare(self):
pass
class Watch(Hook):
def __init__(self, config):
self.config = config
def get_template(self):
name = "Template%s" % self.__class__.__name__.lstrip("Watch")
klass = TEMPLATES.get(name, None)
if klass is not None:
return klass(self.config)
def go(self):
pass
def prepare(self):
pass
class WatchSc(Watch):
def go(self):
tpl = self.get_template()
html = tpl.render()
subject = "[密送]%(prefix)s %(subject)s" % {
"prefix": self.get_title_prefix(), "subject": self.config.mail.subject.encode('utf8')}
send_mail(self.config, recipients=self.config.mail.sc, to=self.config.mail.sc,
msg=html, subject=subject)
class Parser(object):
RE_HOOK_FIELD = r"\[\[(.*)\]\]"
def __init__(self, msg, config):
self.msg = str(msg)
self.config = config
self.fields = self._get_all_fields()
self.hooks = self._get_all_hooks()
def _get_all_fields(self):
return re.findall(self.RE_HOOK_FIELD, self.msg)
def _get_all_hooks(self):
hooks = [x.split(":", 1) for x in self.fields]
_hooks = {}
for k, v in hooks:
_hooks[k.strip()] = v.strip()
return _hooks
def _get_all_hook_objects(prefix='Hook'):
klasses = inspect.getmembers(sys.modules[__name__], inspect.isclass)
hooks = {}
for k, v in klasses:
if k.startswith(prefix) and len(k) > len(prefix):
hooks[k] = v
return hooks
HOOKS = _get_all_hook_objects()
WATCHES = _get_all_hook_objects('Watch')
if __name__ == "__main__":
pass | mit | 6,190,497,298,015,324,000 | 26.293706 | 98 | 0.581497 | false |
VanHulleOne/DogBone | gcode.py | 1 | 2105 | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 06 10:30:44 2015
Used creating all of the lines of Gcode.
@author: lvanhulle
"""
import parameters as pr
def feedMove(endPoint, ommitZ, extrudeTo, printSpeed):
if ommitZ:
tempString = ('X{:.3f} Y{:.3f} E{:.3f}'.format(endPoint.x,
endPoint.y, extrudeTo))
else:
tempString = ('X{:.3f} Y{:.3f} Z{:.3f} E{:.3f}\n'.format(endPoint.x,
endPoint.y, endPoint.z, extrudeTo))
return 'G01 ' + tempString + ' F{:.0f}\n'.format(printSpeed)
def rapidMove(endPoint, ommitZ):
if ommitZ:
return ('G00 X{:.3f} Y{:.3f} F{:.0f}\n'.format(endPoint.x, endPoint.y,
pr.RAPID))
else:
return ('G00 X{:.3f} Y{:.3f} Z{:.3f} F{:.3f}\n'.format(endPoint.x, endPoint.y, endPoint.z,
pr.RAPID))
def retractLayer(currentE, currentPoint):
tempString = 'G1 E{:.3f} F{:.0f}\n'.format(currentE-pr.TRAVERSE_RETRACT, pr.MAX_EXTRUDE_SPEED)
tempString += 'G00 Z{:.3f} F{:.0f}\n'.format(currentPoint.z+pr.Z_CLEARANCE, pr.RAPID)
return tempString
def approachLayer(lastE, startPoint):
tempString = 'G1 Z{:.3f} F{:.0f} E{:.3f}\n'.format(startPoint.z+pr.Z_CLEARANCE/2.0,
pr.RAPID, lastE-pr.TRAVERSE_RETRACT*0.75)
tempString += 'G1 Z{:.3f} F{:.0f} E{:.3f}\n'.format(startPoint.z,
pr.APPROACH_FR, lastE)
return tempString
def firstApproach(lastE, startPoint):
return 'G1 Z{:.3f} F{:.0f} E{:.3f}\n'.format(startPoint.z, pr.APPROACH_FR, lastE)
def newPart():
return 'G92 E0\n'
def startGcode():
with open(pr.startEndSubDirectory + '\\' + pr.start_Gcode_FileName) as startFile:
lines = startFile.readlines()
tempString = ''
for line in lines:
tempString += str(line)
return tempString
def endGcode():
with open(pr.startEndSubDirectory + '\\' + pr.end_Gcode_FileName) as endFile:
lines = endFile.readlines()
tempString = ''
for line in lines:
tempString += str(line)
return tempString | mit | -3,172,441,500,738,403,300 | 31.90625 | 98 | 0.586698 | false |
andyv/sse | ir_nodes.py | 1 | 26015 |
# (C) 2014-2015 Andrew Vaught
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Intermediate representation nodes
import sys
from kw import constant, type_node
from kw import type_void, type_float4, type_float8, type_int8, type_int4
from kw import type_int2, type_int1, type_uint8, type_uint4, type_uint2
from kw import type_uint1, type_float8_2, type_float4_4, type_int8_2
from kw import type_int4_4, type_int2_8, type_int1_16
class parse_error(Exception):
pass
# ir_nodes are expressions, labels and jumps linked together in a
# double linked list. The links are through the 'next' and 'prev'
# members.
class ir_node:
# remove()-- Remove this node from the linked list.
def remove(self):
if self.next is not None:
self.next.prev = self.prev
pass
if self.prev is not None:
self.prev.next = self.next
pass
return
# insert_next()-- Insert a node after this one.
def insert_next(self, node):
if self.next is not None:
self.next.prev = node
pass
node.next = self.next
node.prev = self
self.next = node
return
# insert_prev()-- Insert a node previous to this one.
def insert_prev(self, node):
if self.prev is not None:
self.prev.next = node
pass
node.next = self
node.prev = self.prev
self.prev = node
return
def successor(self):
return [] if self.next is None else [ self.next ]
def predecessor(self):
if self.prev is None or \
(isinstance(self.prev, jump) and self.prev.cond is None):
return []
return [ self.prev ]
pass
# jumps represent control transfers to other nodes. The target is the
# 'label' member. If 'cond' is non-None, then it must be an
# expression. The branch is taken if the expression is true.
class jump(ir_node):
def __init__(self, label, cond=None):
self.label = label
self.cond = cond
label.jumps.append(self)
return
def show(self):
print 'jump %s' % self.label.name,
if self.cond is not None:
print ' cond=',
self.cond.show()
pass
return
def successor(self):
if self.cond is None:
return [ self.label ]
return [ self.label ] + ir_node.successor(self)
def replace_vars(self, repl):
if self.cond in repl:
self.cond = repl[self.cond]
else:
self.cond.replace_vars(repl)
pass
return
pass
# Labels are jump targets in the code. They have a string 'name'.
# The 'jumps' member is a list of jump nodes that jump to this label,
# conditionally or unconditionally.
class label(ir_node):
def __init__(self, name):
self.name = name
self.defined = False
self.jumps = []
self.phi_list = []
return
def show(self):
print '%s (%d):' % (self.name, len(self.jumps)),
for p in self.phi_list:
p.show()
sys.stdout.write(' ')
pass
return
def predecessor(self):
return ir_node.predecessor(self) + self.jumps
pass
def get_temp_label(index=[0]):
index[0] += 1
return label('L.%d' % index[0])
class variable:
def __init__(self, name, var_type, initial=None,
q_static=None, q_extern=None):
assert(isinstance(var_type, type_node))
self.name = name
self.type = var_type
self.q_static = q_static
self.q_extern = q_extern
self.initial = initial
if var_type.basic_type is type_void:
raise parse_error, 'Variable cannot have a void type'
return
def show(self, show_initial=False):
print self.name,
if show_initial and self.initial is not None:
print '= ',
self.initial.show()
pass
return
def simplify(self):
return self
def replace_vars(self, repl):
return
def used_vars(self, result):
result[self] = True
return
# next_variant()-- Return the next variant on the base variable. The
# variants don't have the c or stack members. The variant is
# automatically put on the stack.
def next_variant(self):
self.c += 1
v = variable('%s.%d' % ( self.name, self.c ), self.type)
self.stack.append(v)
return v
pass
class phi:
def __init__(self, var):
self.var = var
self.lhs = None
self.args = []
return
def show(self):
arglist = ', '.join([ a.var.name for a in self.args ])
print '%s = phi(%s)' % ( self.lhs.name, arglist ),
return
pass
# Phi arguments consist of variable instances and the statement node
# that the control associated with that variable comes from.
class phi_arg:
def __init__(self, var, node):
self.var = var
self.node = node
return
pass
# Expressions are assignment statements and other binary/unary
# expressions.
class expr(ir_node):
def jump_opcode(self):
return 'jnz'
pass
# Sub-classes of expressions
class expr_assign(expr):
def __init__(self, *args):
self.var, self.value = args
return
def show(self):
self.var.show()
sys.stdout.write(' = ')
self.value.show()
return
def simplify(self):
self.var = self.var.simplify()
self.value = self.value.simplify()
return self
def used_vars(self, result):
result[self.var] = True
self.value.used_vars(result)
return
def replace_vars(self, repl):
if self.value in repl:
self.value = repl[self.value]
else:
self.value.replace_vars(repl)
pass
return
pass
# swaps always have variables for arguments, they are produced
# deep in the phi-merging code.
class swap(expr):
def __init__(self, a, b):
self.a = a
self.b = b
return
def show(self):
sys.stdout.write('swap ')
self.a.show()
sys.stdout.write(', ')
self.b.show()
return
pass
class expr_ternary(expr):
def __init__(self, *args):
self.predicate, self.a, self.b = args
return
def show(self):
self.predicate.show()
sys.stdout.write(' ? ')
self.a.show()
sys.stdout.write(' : ')
self.b.show()
return
def simplify(self):
if not isinstance(self.predicate, constant):
return self
self.a.simplify()
self.b.simplify()
return self.a if self.predicate.value else self.b
def used_vars(self, result):
self.predicate.used_vars(result)
self.predicate.a(result)
self.predicate.b(result)
return
def replace_vars(self, repl):
if self.predicate in repl:
self.predicate = repl[self.predicate]
else:
self.predicate.replace_vars(repl)
pass
if self.a in repl:
self.a = repl[self.a]
else:
self.a.replace_vars(repl)
pass
if self.b in repl:
self.b = repl[self.b]
else:
self.b.replace_vars(repl)
pass
return
pass
#########################
binary_type_map = {}
bitwise_type_map = {}
logical_type_map = {}
def init_btm():
btm = binary_type_map
for t in [ type_float8, type_float4, type_int8, type_int4, type_int2,
type_int1, type_uint8, type_uint4, type_uint2, type_uint1 ]:
btm[type_float8_2, t] = type_float8_2
btm[type_float4_4, t] = type_float4_4
btm[type_float8, t] = type_float8
pass
tlist = [ type_float4, type_uint8, type_int8, type_uint4, type_int4,
type_uint2, type_int2, type_uint1, type_int1 ]
while len(tlist) > 0:
t1 = tlist.pop(0)
btm[t1, t1] = t1
for t2 in tlist:
btm[t1, t2] = t1
pass
pass
for t1, t2 in btm.keys():
btm[t2, t1] = btm[t1, t2]
pass
btm = bitwise_type_map
tlist = [ type_uint8, type_int8, type_uint4, type_int4,
type_uint2, type_int2, type_uint1, type_int1 ]
while len(tlist) > 0:
t1 = tlist.pop(0)
btm[t1, t1] = t1
logical_type_map[t1, t1] = type_int4
for t2 in tlist:
btm[t1, t2] = t1
logical_type_map[t1, t2] = type_int4
pass
pass
for t1, t2 in btm.keys():
btm[t2, t1] = btm[t1, t2]
logical_type_map[t2, t1] = type_int4
pass
return
init_btm()
class expr_binary(expr):
type_map = { '+': binary_type_map, '-': binary_type_map,
'*': binary_type_map, '/': binary_type_map,
'%': binary_type_map,
'>>': bitwise_type_map, '<<': bitwise_type_map,
'&': bitwise_type_map, '^': bitwise_type_map,
'|': bitwise_type_map,
'&&': logical_type_map, '||': logical_type_map,
'==': logical_type_map, '!=': logical_type_map,
'<=': logical_type_map, '<': logical_type_map,
'>=': logical_type_map, '>': logical_type_map,
}
def __init__(self, *args):
self.a, self.b = args
# Keep constants typeless.
if isinstance(self.a, constant) and isinstance(self.b, constant):
return
if isinstance(self.a, constant):
self.type = self.b.type
return
if isinstance(self.b, constant):
self.type = self.a.type
return
ta = self.a.type.basic_type
tb = self.b.type.basic_type
try:
self.type = type_node(self.type_map[self.op][ta, tb], 0)
except KeyError:
msg = 'Operator "%s" cannot use arguments %s/%s' % \
(self.op, ta.name, tb.name)
raise parse_error, msg
if self.type.basic_type != ta:
self.a = expr_type_conv(self.type, self.a)
pass
if self.type.basic_type != tb:
self.b = expr_type_conv(self.type, self.b)
pass
return
def show(self):
self.a.show()
sys.stdout.write(self.op)
self.b.show()
return
def used_vars(self, result):
self.a.used_vars(result)
self.b.used_vars(result)
return
def replace_vars(self, repl):
if self.a in repl:
self.a = repl[self.a]
else:
self.a.replace_vars(repl)
pass
if self.b in repl:
self.b = repl[self.b]
else:
self.b.replace_vars(repl)
pass
return
class expr_mult(expr_binary):
op = '*'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant):
if self.a.value == 0:
return constant(0, self.a.type)
if self.a.value == 1:
return self.b
if self.a.value == -1:
return expr_uminus(self.b).simplify()
pass
if isinstance(self.b, constant):
if self.b.value == 0:
return constant(0, self.b.type)
if self.b.value == 1:
return self.a
if self.b.value == -1:
return expr_uminus(self.a).simplify()
pass
if isinstance(self.a, constant) and isinstance(self.b, constant):
return constant(self.a.value + self.b.value, self.a.type)
return self
pass
class expr_quotient(expr_binary):
op = '/'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant):
if self.a.value == 0:
return constant(0, self.a.type)
pass
if isinstance(self.b, constant):
if self.b.value == 1:
return self.a
if self.b.value == -1:
return expr_uminus(self.a).simplify()
pass
if isinstance(self.a, constant) and isinstance(self.b, constant):
return constant(self.a.value / self.b.value, self.a.type)
return self
pass
class expr_modulus(expr_binary):
op = '%'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant) and isinstance(self.b, constant):
return constant(self.a.value % self.b.value, self.a.type)
return self
pass
class expr_plus(expr_binary):
op = '+'
arith_op = 'add'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant) and self.a.value == 0:
return self.b
if isinstance(self.b, constant) and self.b.value == 0:
return self.a
if isinstance(self.a, constant) and isinstance(self.b, constant):
return constant(self.a.value + self.b.value, self.a.type)
return self
pass
class expr_minus(expr_binary):
op = '-'
arith_op = 'sub'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant) and self.a.value == 0:
return expr_uminus(self.b).simplify()
if isinstance(self.b, constant) and self.b.value == 0:
return self.a
if isinstance(self.a, constant) and isinstance(self.b, constant):
return constant(self.a.value - self.b.value, self.a.type)
return self
pass
class expr_lshift(expr_binary):
op = '<<'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant) and isinstance(self.b, constant):
return constant(self.a.value << self.b.value, self.a.type)
return self
pass
class expr_rshift(expr_binary):
op = '>>'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant) and isinstance(self.b, constant):
return constant(self.a.value >> self.b.value, self.a.type)
return self
pass
class expr_bitwise_and(expr_binary):
op = '&'
arith_op = 'and'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant) and isinstance(self.b, constant):
return constant(self.a.value & self.b.value, self.a.type)
return self
pass
class expr_bitwise_xor(expr_binary):
op = '^'
arith_op = 'xor'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant) and isinstance(self.b, constant):
return constant(self.a.value ^ self.b.value, self.a.type)
return self
pass
class expr_bitwise_or(expr_binary):
op = '|'
arith_op = 'or'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant) and isinstance(self.b, constant):
return constant(self.a.value | self.b.value, self.a.type)
return self
pass
class expr_logical_and(expr_binary):
op = '&&'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant) and self.a.value != 0:
return self.b
return self
pass
class expr_logical_or(expr_binary):
op = '||'
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant):
return self.b if self.a.value == 0 else constant(1, self.a.type)
return self
pass
class expr_compare(expr_binary):
def simplify(self):
self.a = self.a.simplify()
self.b = self.b.simplify()
if isinstance(self.a, constant) and isinstance(self.b, constant):
a = self.value.a
b = self.value.b
if self.op == '==':
rc = 1 if a == b else 0
elif self.op == '<':
rc = 1 if a < b else 0
elif self.op == '<=':
rc = 1 if a <= b else 0
elif self.op == '>':
rc = 1 if a > b else 0
elif self.op == '<=':
rc = 1 if a >= b else 0
else:
raise SystemExit, 'expr_compare.simplify(): Bad operator'
return constant(rc, type_node(type_int4, 0))
return self
pass
class expr_equal(expr_compare):
op = '=='
pass
class expr_not_equal(expr_compare):
op = '!='
pass
class expr_less(expr_compare):
op = '<'
pass
class expr_less_equal(expr_compare):
op = '<='
pass
class expr_greater(expr_compare):
op = '>'
pass
class expr_greater_equal(expr_compare):
op = '>='
pass
opposite_cond = {
expr_equal: expr_not_equal,
expr_not_equal: expr_equal,
expr_less: expr_greater_equal,
expr_less_equal: expr_greater,
expr_greater: expr_less_equal,
expr_greater_equal: expr_less,
}
################
class expr_unary(expr):
def __init__(self, arg):
self.arg = arg
self.type = arg.type
return
def show(self):
sys.stdout.write(self.op)
sys.stdout.write('(')
self.arg.show()
sys.stdout.write(')')
return
def used_vars(self, result):
self.arg.used_vars(result)
return
def replace_vars(self, repl):
if self.arg in repl:
self.arg = repl[self.arg]
else:
self.arg.replace_vars(repl)
pass
return
pass
class expr_uplus(expr_unary):
op = '+'
def simplify(self):
self.arg.simplify()
return self.arg
pass
class expr_uminus(expr_unary):
op = '-'
arith_op = 'neg'
def simplify(self):
self.arg = self.arg.simplify()
if isinstance(self.arg, expr_uminus):
return self.arg.arg
if not isinstance(self.arg, constant):
return self
self.arg.value *= -1
return self.arg
pass
class expr_load(expr_unary):
op = '*'
def simplify(self):
self.arg = self.arg.simplify()
return self
pass
class expr_logical_not(expr_unary):
op = '!'
def simplify(self):
self.arg = a = self.arg.simplify()
if isinstance(a, expr_compare):
return opposite_cond[a.__class__](a.a, a.b)
return self
pass
class expr_bitwise_not(expr_unary):
op = '~'
arith_op = 'not'
def simplify(self):
self.arg = a = self.arg.simplify()
if isinstance(a, constant):
a.value = ~a.value
return self.arg
return self
class expr_paren(expr_unary):
def show(self):
sys.stdout.write('(')
self.arg.show()
sys.stdout.write(')')
return
def simplify(self):
return self.arg.simplify()
pass
class expr_intrinsic(expr):
def __init__(self, *args):
self.name, self.arg = args
return
def show(self):
sys.stdout.write(self.name + '(')
self.arg.show()
sys.stdout.write(')')
return
def used_vars(self, result):
self.arg.used_vars(result)
return
def replace_vars(self, repl):
if self.arg in repl:
self.arg = repl[self.arg]
else:
self.arg.replace_vars(repl)
pass
return
pass
class expr_type_conv(expr_intrinsic):
def __init__(self, type_decl, arg):
self.type = type_decl
self.arg = arg
return
def show(self):
sys.stdout.write('type_' + self.type.basic_type.name + '(')
self.arg.show()
sys.stdout.write(')')
return
def simplify(self):
self.arg = self.arg.simplify()
if not isinstance(self.arg, constant):
return self
self.arg.type = self.type
return self.arg
pass
# invert_condition()-- Invert the condition, simplifying.
def invert_condition(e):
e = expr_logical_not(e)
return e.simplify()
### Machine registers
class register:
def __str__(self):
return self.name
pass
# Constants like reg_a represent the rax register, the sub-registers
# are the ones actually used inside insn nodes. The 'name' member is
# the name that shows up on the assembler output.
class integer_subreg(register):
memory = False
def __init__(self, name):
self.name = '%' + name
return
pass
class integer_register(register):
def __init__(self, n64, n32, n16, n8):
r8 = integer_subreg(n64)
r4 = integer_subreg(n32)
r2 = integer_subreg(n16)
r1 = integer_subreg(n8)
self.map = { type_int1: r1, type_uint1: r1,
type_int2: r2, type_uint2: r2,
type_int4: r4, type_uint4: r4,
type_int8: r8, type_uint8: r8, }
for v in [ r1, r2, r4, r8 ]:
v.parent = self
pass
return
def get_subreg(self, t):
return self.map[type_uint8] if t.level > 0 else self.map[t.basic_type]
pass
reg_a = integer_register('rax', 'eax', 'ax', 'al')
reg_b = integer_register('rbx', 'ebx', 'bx', 'bl')
reg_c = integer_register('rcx', 'ecx', 'cx', 'cl')
reg_d = integer_register('rdx', 'edx', 'dx', 'dl')
reg_src = integer_register('rsi', 'esi', 'si', 'sil')
reg_dst = integer_register('rdi', 'edi', 'di', 'dil')
reg_base = integer_register('rbp', 'ebp', 'bp', 'bpl')
reg_8 = integer_register('r8', 'r8d', 'r8l', 'r8b')
reg_9 = integer_register('r9', 'r9d', 'r9l', 'r9b')
reg_10 = integer_register('r10', 'r10d', 'r10l', 'r10b')
reg_11 = integer_register('r11', 'r11d', 'r11l', 'r11b')
reg_12 = integer_register('r12', 'r12d', 'r12l', 'r12b')
reg_13 = integer_register('r13', 'r13d', 'r13l', 'r13b')
reg_14 = integer_register('r14', 'r14d', 'r14l', 'r14b')
reg_15 = integer_register('r15', 'r15d', 'r15l', 'r15b')
integer_regs = [ reg_a, reg_b, reg_c, reg_d, reg_src, reg_dst, reg_base,
reg_8, reg_9, reg_10, reg_11, reg_12, reg_13, reg_14, reg_15 ]
class xmm_register(register):
mem = False
def __init__(self, name):
self.name = name
return
pass
xmm0 = xmm_register('xmm0')
xmm1 = xmm_register('xmm1')
xmm2 = xmm_register('xmm2')
xmm3 = xmm_register('xmm3')
xmm4 = xmm_register('xmm4')
xmm5 = xmm_register('xmm5')
xmm6 = xmm_register('xmm6')
xmm7 = xmm_register('xmm7')
xmm8 = xmm_register('xmm8')
xmm9 = xmm_register('xmm9')
xmm10 = xmm_register('xmm10')
xmm11 = xmm_register('xmm11')
xmm12 = xmm_register('xmm12')
xmm13 = xmm_register('xmm13')
xmm14 = xmm_register('xmm14')
xmm15 = xmm_register('xmm15')
xmm_regs = [ xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15 ]
# Memory registers are regular registers that happen to map to memory
# instead of a cpu register.
class memory(register):
mem = True
def __init__(self, n):
self.n = n
return
pass
def get_memory_register(count=[0]):
count[0] += 1
return memory(count[0])
# number_st()-- Utility for show_flowgraph that assigns statement
# numbers to ir_nodes.
def number_st(graph):
n = 0
st = graph
while st is not None:
st.n = n
n = n + 1
st = st.next
pass
return
# show_flowgraph()-- Show the flow graph. This subroutine is capable
# of show nodes at several different stages of compilation.
def show_flowgraph(graph):
number_st(graph)
st = graph
while st is not None:
if hasattr(st, 'live'):
print 'Live:', ', '.join([ v.name for v in st.live ])
pass
if hasattr(st, 'dom'):
d = -1 if st.dom is None else st.dom.n
print '%3d (%3d) ' % (st.n, d),
pass
if hasattr(st, 'DF'):
df = ', '.join([ str(x.n) for x in st.DF ])
print '[ %-15s ]' % df,
pass
n = 2 if isinstance(st, label) else 8
sys.stdout.write(n*' ')
st.show()
print
st = st.next
pass
return
| bsd-2-clause | -4,133,121,555,676,565,500 | 21.121599 | 80 | 0.554642 | false |
luoyetx/mxnet | python/mxnet/optimizer.py | 1 | 58574 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=too-many-lines
"""Weight updating functions."""
import math
import pickle
import warnings
import numpy
from .base import py_str
from .ndarray import (NDArray, zeros, clip, sqrt, cast, maximum, abs as NDabs)
from .ndarray import (sgd_update, sgd_mom_update, adam_update, rmsprop_update, rmspropalex_update,
mp_sgd_update, mp_sgd_mom_update, square, ftrl_update, ftml_update,
signsgd_update, signum_update)
from .ndarray import sparse
from .random import normal
class Optimizer(object):
"""The base class inherited by all optimizers.
Parameters
----------
rescale_grad : float, optional
Multiply the gradient with `rescale_grad` before updating. Often
choose to be ``1.0/batch_size``.
param_idx2name : dict from int to string, optional
A dictionary that maps int index to string name.
clip_gradient : float, optional
Clip the gradient by projecting onto the box ``[-clip_gradient, clip_gradient]``.
learning_rate : float, optional
The initial learning rate.
lr_scheduler : LRScheduler, optional
The learning rate scheduler.
wd : float, optional
The weight decay (or L2 regularization) coefficient. Modifies objective
by adding a penalty for having large weights.
sym: Symbol, optional
The Symbol this optimizer is applying to.
begin_num_update : int, optional
The initial number of updates.
multi_precision : bool, optional
Flag to control the internal precision of the optimizer.
``False`` results in using the same precision as the weights (default),
``True`` makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.
Turning this on can improve convergence and accuracy when training with float16.
Properties
----------
learning_rate : float
The current learning rate of the optimizer. Given an Optimizer object
optimizer, its learning rate can be accessed as optimizer.learning_rate.
"""
def __init__(self, rescale_grad=1., param_idx2name=None, wd=0.,
clip_gradient=None, learning_rate=0.01,
lr_scheduler=None, sym=None, begin_num_update=0,
multi_precision=False, param_dict=None):
self.rescale_grad = rescale_grad
self.lr = learning_rate
self.lr_scheduler = lr_scheduler
if lr_scheduler is not None:
self.lr_scheduler.base_lr = learning_rate
self.wd = wd
self.lr_mult = {}
self.wd_mult = {}
self.begin_num_update = begin_num_update
self.num_update = begin_num_update
self._index_update_count = {}
self.clip_gradient = clip_gradient
self.multi_precision = multi_precision
if param_idx2name is None:
param_idx2name = {}
assert isinstance(param_idx2name, dict), \
'param_idx2name should be a dict of param indexes to names.'
self.idx2name = param_idx2name.copy()
self.sym_info = (sym.attr_dict(), sym.list_arguments()) if sym is not None else ()
self.param_dict = param_dict if param_dict else {}
self.set_lr_mult({})
self.set_wd_mult({})
opt_registry = {}
@staticmethod
def register(klass):
"""Registers a new optimizer.
Once an optimizer is registered, we can create an instance of this
optimizer with `create_optimizer` later.
Examples
--------
>>> @mx.optimizer.Optimizer.register
... class MyOptimizer(mx.optimizer.Optimizer):
... pass
>>> optim = mx.optimizer.Optimizer.create_optimizer('MyOptimizer')
>>> print(type(optim))
<class '__main__.MyOptimizer'>
"""
assert(isinstance(klass, type))
name = klass.__name__.lower()
if name in Optimizer.opt_registry:
warnings.warn('WARNING: New optimizer %s.%s is overriding '
'existing optimizer %s.%s' %
(klass.__module__, klass.__name__,
Optimizer.opt_registry[name].__module__,
Optimizer.opt_registry[name].__name__))
Optimizer.opt_registry[name] = klass
return klass
@staticmethod
def create_optimizer(name, **kwargs):
"""Instantiates an optimizer with a given name and kwargs.
.. note:: We can use the alias `create` for ``Optimizer.create_optimizer``.
Parameters
----------
name: str
Name of the optimizer. Should be the name
of a subclass of Optimizer. Case insensitive.
kwargs: dict
Parameters for the optimizer.
Returns
-------
Optimizer
An instantiated optimizer.
Examples
--------
>>> sgd = mx.optimizer.Optimizer.create_optimizer('sgd')
>>> type(sgd)
<class 'mxnet.optimizer.SGD'>
>>> adam = mx.optimizer.create('adam', learning_rate=.1)
>>> type(adam)
<class 'mxnet.optimizer.Adam'>
"""
if name.lower() in Optimizer.opt_registry:
return Optimizer.opt_registry[name.lower()](**kwargs)
else:
raise ValueError('Cannot find optimizer %s' % name)
@property
def learning_rate(self):
if self.lr_scheduler is not None:
return self.lr_scheduler(self.num_update)
else:
return self.lr
def create_state(self, index, weight):
"""Creates auxiliary state for a given weight.
Some optimizers require additional states, e.g. as momentum, in addition
to gradients in order to update weights. This function creates state
for a given weight which will be used in `update`. This function is
called only once for each weight.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
def create_state_multi_precision(self, index, weight):
"""Creates auxiliary state for a given weight, including FP32 high
precision copy if original weight is FP16.
This method is provided to perform automatic mixed precision training
for optimizers that do not support it themselves.
Parameters
----------
index : int
An unique index to identify the weight.
weight : NDArray
The weight.
Returns
-------
state : any obj
The state associated with the weight.
"""
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = weight.astype(numpy.float32)
return (weight_master_copy,) + (self.create_state(index, weight_master_copy),)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"optimizer")
return self.create_state(index, weight)
def update(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
raise NotImplementedError()
def update_multi_precision(self, index, weight, grad, state):
"""Updates the given parameter using the corresponding gradient and state.
Mixed precision version.
Parameters
----------
index : int
The unique index of the parameter into the individual learning
rates and weight decays. Learning rates and weight decay
may be set via `set_lr_mult()` and `set_wd_mult()`, respectively.
weight : NDArray
The parameter to be updated.
grad : NDArray
The gradient of the objective with respect to this parameter.
state : any obj
The state returned by `create_state()`.
"""
if self.multi_precision and weight.dtype == numpy.float16:
# Wrapper for mixed precision
weight_master_copy = state[0]
original_state = state[1]
grad32 = grad.astype(numpy.float32)
self.update(index, weight_master_copy, grad32, original_state)
cast(weight_master_copy, dtype=weight.dtype, out=weight)
else:
self.update(index, weight, grad, state)
def set_learning_rate(self, lr):
"""Sets a new learning rate of the optimizer.
Parameters
----------
lr : float
The new learning rate of the optimizer.
"""
if self.lr_scheduler is not None:
raise UserWarning("LRScheduler of the optimizer has already been "
"defined. Note that set_learning_rate can mutate "
"the value of the learning rate of the optimizer "
"only when the LRScheduler of the optimizer is "
"undefined.")
else:
self.lr = lr
def set_lr_scale(self, args_lrscale): # pylint: disable=unused-argument
"""[DEPRECATED] Sets lr scale. Use set_lr_mult instead."""
raise DeprecationWarning
def set_lr_mult(self, args_lr_mult):
"""Sets an individual learning rate multiplier for each parameter.
If you specify a learning rate multiplier for a parameter, then
the learning rate for the parameter will be set as the product of
the global learning rate `self.lr` and its multiplier.
.. note:: The default learning rate multiplier of a `Variable`
can be set with `lr_mult` argument in the constructor.
Parameters
----------
args_lr_mult : dict of str/int to float
For each of its key-value entries, the learning rate multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.lr_mult = {}
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__lr_mult__' in attr[name]:
self.lr_mult[name] = float(attr[name]['__lr_mult__'])
self.lr_mult.update(args_lr_mult)
def set_wd_mult(self, args_wd_mult):
"""Sets an individual weight decay multiplier for each parameter.
By default, if `param_idx2name` was provided in the
constructor, the weight decay multipler is set as 0 for all
parameters whose name don't end with ``_weight`` or
``_gamma``.
.. note:: The default weight decay multiplier for a `Variable`
can be set with its `wd_mult` argument in the constructor.
Parameters
----------
args_wd_mult : dict of string/int to float
For each of its key-value entries, the weight decay multipler for the
parameter specified in the key will be set as the given value.
You can specify the parameter with either its name or its index.
If you use the name, you should pass `sym` in the constructor,
and the name you specified in the key of `args_lr_mult` should match
the name of the parameter in `sym`. If you use the index, it should
correspond to the index of the parameter used in the `update` method.
Specifying a parameter by its index is only supported for backward
compatibility, and we recommend to use the name instead.
"""
self.wd_mult = {}
for n in self.idx2name.values():
if not (n.endswith('_weight') or n.endswith('_gamma')):
self.wd_mult[n] = 0.0
if self.sym_info:
attr, arg_names = self.sym_info
for name in arg_names:
if name in attr and '__wd_mult__' in attr[name]:
self.wd_mult[name] = float(attr[name]['__wd_mult__'])
self.wd_mult.update(args_wd_mult)
def _update_count(self, index):
"""Updates num_update.
Parameters
----------
index : int
The index to be updated.
"""
if index not in self._index_update_count:
self._index_update_count[index] = self.begin_num_update
self._index_update_count[index] += 1
self.num_update = max(self._index_update_count[index], self.num_update)
def _get_lr(self, index):
"""Gets the learning rate given the index of the weight.
Parameters
----------
index : int
The index corresponding to the weight.
Returns
-------
lr : float
Learning rate for this index.
"""
if self.lr_scheduler is not None:
lr = self.lr_scheduler(self.num_update)
else:
lr = self.lr
if index in self.param_dict:
lr *= self.param_dict[index].lr_mult
elif index in self.lr_mult:
lr *= self.lr_mult[index]
elif index in self.idx2name:
lr *= self.lr_mult.get(self.idx2name[index], 1.0)
return lr
def _get_wd(self, index):
"""Gets weight decay for index.
Returns 0 for non-weights if the name of weights are provided for `__init__`.
Parameters
----------
index : int
The index for weight.
Returns
-------
wd : float
Weight decay for this index.
"""
wd = self.wd
if index in self.param_dict:
wd *= self.param_dict[index].wd_mult
elif index in self.wd_mult:
wd *= self.wd_mult[index]
elif index in self.idx2name:
wd *= self.wd_mult.get(self.idx2name[index], 1.0)
return wd
# convenience wrapper for Optimizer.Register
register = Optimizer.register # pylint: disable=invalid-name
# pylint: disable=line-too-long
@register
class SGD(Optimizer):
"""The SGD optimizer with momentum and weight decay.
If the storage types of weight and grad are both ``row_sparse``, and ``lazy_update`` is True, \
**lazy updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = lr * rescale_grad * clip(grad[row], clip_gradient) + wd * weight[row]
state[row] = momentum[row] * state[row] + rescaled_grad[row]
weight[row] = weight[row] - state[row]
The sparse update only updates the momentum for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all
indices. Compared with the original update, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original update, and
may lead to different empirical results.
Otherwise, **standard updates** are applied by::
rescaled_grad = lr * rescale_grad * clip(grad, clip_gradient) + wd * weight
state = momentum * state + rescaled_grad
weight = weight - state
For details of the update algorithm see
:class:`~mxnet.ndarray.sgd_update` and :class:`~mxnet.ndarray.sgd_mom_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lazy_update : bool, optional
Default is True. If True, lazy updates are applied \
if the storage types of weight and grad are both ``row_sparse``.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.
``False`` results in using the same precision as the weights (default),
``True`` makes internal 32-bit copy of the weights and applies gradients \
in 32-bit precision even if actual weights used in the model have lower precision.\
Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, lazy_update=True, **kwargs):
super(SGD, self).__init__(**kwargs)
self.momentum = momentum
self.lazy_update = lazy_update
def create_state_multi_precision(self, index, weight):
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = weight.astype(numpy.float32)
return (self.create_state(index, weight_master_copy), weight_master_copy)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"SGD optimizer")
return self.create_state(index, weight)
def create_state(self, index, weight):
momentum = None
stype = weight.stype if self.lazy_update else 'default'
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=stype)
return momentum
def _update_impl(self, index, weight, grad, state, multi_precision=False):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if not multi_precision:
if state is not None:
sgd_mom_update(weight, grad, state, out=weight,
lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight, lazy_update=self.lazy_update,
lr=lr, wd=wd, **kwargs)
else:
if state[0] is not None:
mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight,
lr=lr, wd=wd, **kwargs)
else:
mp_sgd_update(weight, grad, state[1], out=weight,
lr=lr, wd=wd, **kwargs)
def update(self, index, weight, grad, state):
self._update_impl(index, weight, grad, state, multi_precision=False)
def update_multi_precision(self, index, weight, grad, state):
use_multi_precision = self.multi_precision and weight.dtype == numpy.float16
self._update_impl(index, weight, grad, state,
multi_precision=use_multi_precision)
@register
class Signum(Optimizer):
"""The Signum optimizer that takes the sign of gradient or momentum.
The optimizer updates the weight by:
rescaled_grad = rescale_grad * clip(grad, clip_gradient) + wd * weight
state = momentum * state + (1-momentum)*rescaled_grad
weight = (1 - lr * wd_lh) * weight - lr * sign(state)
See the original paper at: https://jeremybernste.in/projects/amazon/signum.pdf
For details of the update algorithm see
:class:`~mxnet.ndarray.signsgd_update` and :class:`~mxnet.ndarray.signum_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
wd_lh : float, optional
The amount of decoupled weight decay regularization, see details in the original paper at:\
https://arxiv.org/abs/1711.05101
"""
def __init__(self, learning_rate=0.01, momentum=0.9, wd_lh=0.0, **kwargs):
super(Signum, self).__init__(learning_rate=learning_rate, **kwargs)
self.momentum = momentum
self.wd_lh = wd_lh
def create_state(self, index, weight):
momentum = None
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype)
return momentum
def _update_impl(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if self.wd_lh:
kwargs['wd_lh'] = self.wd_lh
if state is not None:
signum_update(weight, grad, state, out=weight,
lr=lr, wd=wd, **kwargs)
else:
signsgd_update(weight, grad, out=weight,
lr=lr, wd=wd, **kwargs)
def update(self, index, weight, grad, state):
self._update_impl(index, weight, grad, state)
@register
class FTML(Optimizer):
"""The FTML optimizer.
This class implements the optimizer described in
*FTML - Follow the Moving Leader in Deep Learning*,
available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
0 < beta1 < 1. Generally close to 0.5.
beta2 : float, optional
0 < beta2 < 1. Generally close to 1.
epsilon : float, optional
Small value to avoid division by 0.
"""
def __init__(self, beta1=0.6, beta2=0.999, epsilon=1e-8, **kwargs):
super(FTML, self).__init__(**kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # d_0
zeros(weight.shape, weight.context, dtype=weight.dtype), # v_0
zeros(weight.shape, weight.context, dtype=weight.dtype)) # z_0
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad, 't': t}
if self.clip_gradient:
kwargs['clip_grad'] = self.clip_gradient
prev_d, prev_v, prev_z = state
ftml_update(weight, grad, prev_d, prev_v, prev_z, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class LBSGD(Optimizer):
"""The Large Batch SGD optimizer with momentum and weight decay.
The optimizer updates the weight by::
state = momentum * state + lr * rescale_grad * clip(grad, clip_gradient) + wd * weight
weight = weight - state
For details of the update algorithm see :class:`~mxnet.ndarray.lbsgd_update` and
:class:`~mxnet.ndarray.lbsgd_mom_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.
``False`` results in using the same precision as the weights (default),
``True`` makes internal 32-bit copy of the weights and applies gradients
in 32-bit precision even if actual weights used in the model have lower precision.`<
Turning this on can improve convergence and accuracy when training with float16.
warmup_strategy: string ('linear', 'power2', 'sqrt'. , 'lars' default : 'linear')
warmup_epochs: unsigned, default: 5
batch_scale: unsigned, default: 1 (same as batch size*numworkers)
updates_per_epoch: updates_per_epoch (default: 32, Default might not reflect true number batches per epoch. Used for warmup.)
begin_epoch: unsigned, default 0, starting epoch.
"""
def __init__(self, momentum=0.0, multi_precision=False, warmup_strategy='linear',
warmup_epochs=5, batch_scale=1, updates_per_epoch=32, begin_epoch=0, num_epochs=60,
**kwargs):
super(LBSGD, self).__init__(**kwargs)
logging.info('Running Large-Batch SGD Algorithm')
logging.info('(Batch_scale=%f, warmup_epochs=%d, warmup_strategy=%s, updates_per_epoch=%d)',
batch_scale, warmup_epochs, warmup_strategy, updates_per_epoch)
self.momentum = momentum
self.multi_precision = multi_precision
# new user parameters for large batch
self.warmup_strategy = warmup_strategy
self.warmup_epochs = warmup_epochs
self.batch_scale = batch_scale
self.updates_per_epoch = updates_per_epoch
self.init_updates = begin_epoch * updates_per_epoch
self.num_epochs = num_epochs
# addl internal usage parameters and storage
self.lbmult = 1
self.cumgrads = {}
# for adaptive lr
self.adaptive = False
self.admult = 1 # adaptation constant
def create_state(self, index, weight):
momentum = None
weight_master_copy = None
if self.multi_precision and weight.dtype == numpy.float16:
weight_master_copy = array(weight, ctx=weight.context, dtype=numpy.float32)
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=numpy.float32,
stype=weight.stype)
return (momentum, weight_master_copy)
if weight.dtype == numpy.float16 and not self.multi_precision:
warnings.warn("Accumulating with float16 in optimizer can lead to "
"poor accuracy or slow convergence. "
"Consider using multi_precision=True option of the "
"SGD optimizer")
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype, stype=weight.stype)
return momentum
def _get_lbmult(self, nup):
"""Returns lr scaling factor for large batch according to warmup schedule
(to be implemented)
"""
nwup = self.warmup_epochs * self.updates_per_epoch
strategy = self.warmup_strategy
maxmult = float(self.batch_scale)
if nup >= nwup:
mult = maxmult
elif nwup <= 1:
mult = 1.0
else:
if (strategy == 'linear'):
mult = 1.0 + (maxmult - 1) * nup / nwup
elif (strategy == 'power2'):
mult = 1.0 + (maxmult-1) * (nup*nup)/(nwup*nwup)
elif (strategy == 'sqrt'):
mult = 1.0 + (maxmult - 1) * math.sqrt(float(nup) / nwup)
else:
mult = 1.0
return mult
def _get_lars(self, weight, g, wd):
"""Returns a scaling factor for the learning rate for this layer
default is 1
"""
weight2 = self._l2norm(weight)
grad2 = self._l2norm(g)
lars = math.sqrt(weight2 / (grad2 + wd * weight2 + 1e-18))
if lars < 0.01:
lars = 0.01
elif lars > 100:
lars = 100
return lars
def _l2norm(self, v):
"inner product implementation"
norm = multiply(v, v).asnumpy().sum()
return norm
def _reset_cum_gradient(self, index):
"called every macro-batch to reset cumulated gradients to 0 for a given index"
self.cumgrads[index]['cum_grad'] = 0
def _get_cum_gradient(self, index):
"get the cumulated gradient for index"
if index in self.cumgrads:
return self.cumgrads[index]
else:
return {}
def _put_cum_gradient(self, index, cgrad):
"store cumulated gradient for index"
self.cumgrads[index] = cgrad
def _cumulate_gradient(self, grad, index):
"Cumulate gradients for large-batch emulation. Cumulated by index (layer)"
cgrad = self._get_cum_gradient(index)
if cgrad:
num_cums = cgrad['num_cums']
if num_cums > 0:
cum_grad = cgrad['cum_grad'] + grad
num_cums += 1
else:
cum_grad = grad
num_cums = self.init_updates + 1
else:
cum_grad = grad
num_cums = self.init_updates + 1
cgrad = {'cum_grad': cum_grad, 'num_cums': num_cums}
self._put_cum_gradient(index, cgrad)
return cgrad
def update(self, index, weight, grad, state):
assert (isinstance(weight, NDArray))
assert (isinstance(grad, NDArray))
lr = self._get_lr(index)
wd = self._get_wd(index)
self._update_count(index)
# new stuff for large batch
cgrad = self._cumulate_gradient(grad, index)
if (cgrad['num_cums'] % self.batch_scale) == 0:
grad = cgrad['cum_grad'] / self.batch_scale
if self.warmup_strategy == 'lars':
lbmult = self._get_lars(weight, grad, wd)
else:
lbmult = self._get_lbmult(cgrad['num_cums'])
lr = lr * lbmult
# do the regular sgd update flow
kwargs = {'rescale_grad': self.rescale_grad}
if self.momentum > 0:
kwargs['momentum'] = self.momentum
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
use_multi_precision = isinstance(state, (list, tuple))
if not use_multi_precision:
if state is not None:
sgd_mom_update(weight, grad, state, out=weight, lr=lr, wd=wd, **kwargs)
else:
sgd_update(weight, grad, out=weight, lr=lr, wd=wd, **kwargs)
else:
if state[0] is not None:
mp_sgd_mom_update(weight, grad, state[0], state[1], out=weight, lr=lr, wd=wd,
**kwargs)
else:
mp_sgd_update(weight, grad, state[1], out=weight, lr=lr, wd=wd, **kwargs)
# reset update count and cumulated gradient per large batch
self._reset_cum_gradient(index)
else:
lr = 0.0
kwargs = {}
sgd_update(weight, grad, out=weight, lr=lr, wd=wd, **kwargs)
# pylint: enable=line-too-long
@register
class DCASGD(Optimizer):
"""The DCASGD optimizer.
This class implements the optimizer described in *Asynchronous Stochastic Gradient Descent
with Delay Compensation for Distributed Deep Learning*,
available at https://arxiv.org/abs/1609.08326.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
momentum : float, optional
The momentum value.
lamda : float, optional
Scale DC value.
"""
def __init__(self, momentum=0.0, lamda=0.04, **kwargs):
super(DCASGD, self).__init__(**kwargs)
self.momentum = momentum
self.weight_previous = {}
self.lamda = lamda
def create_state(self, index, weight):
if self.momentum == 0.0:
return (None,
weight.copy()) # previous weight
else:
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # momentum
weight.copy()) # previous weight
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
mom, previous_weight = state
if mom:
mom[:] *= self.momentum
mom[:] += -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
else:
assert(self.momentum == 0.0)
mom = -lr * (grad + wd * weight + self.lamda \
* grad * grad * (weight - previous_weight))
previous_weight[:] = weight
weight[:] += mom
@register
class NAG(Optimizer):
"""Nesterov accelerated SGD.
This optimizer updates each weight by::
state = momentum * state + grad + wd * weight
weight = weight - (lr * (grad + momentum * state))
Parameters
----------
momentum : float, optional
The momentum value.
multi_precision: bool, optional
Flag to control the internal precision of the optimizer.
``False`` results in using the same precision as the weights (default),
``True`` makes internal 32-bit copy of the weights and applies gradients \
in 32-bit precision even if actual weights used in the model have lower precision.\
Turning this on can improve convergence and accuracy when training with float16.
"""
def __init__(self, momentum=0.0, **kwargs):
super(NAG, self).__init__(**kwargs)
self.momentum = momentum
def create_state(self, index, weight):
momentum = None
if self.momentum != 0.0:
momentum = zeros(weight.shape, weight.context, dtype=weight.dtype)
return momentum
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
if state is not None:
mom = state
mom[:] *= self.momentum
grad += wd * weight
mom[:] += grad
grad[:] += self.momentum * mom
weight[:] += -lr * grad
else:
assert self.momentum == 0.0
weight[:] += -lr * (grad + wd * weight)
@register
class SGLD(Optimizer):
"""Stochastic Gradient Riemannian Langevin Dynamics.
This class implements the optimizer described in the paper *Stochastic Gradient
Riemannian Langevin Dynamics on the Probability Simplex*, available at
https://papers.nips.cc/paper/4883-stochastic-gradient-riemannian-langevin-dynamics-on-the-probability-simplex.pdf.
"""
def __init__(self, **kwargs):
super(SGLD, self).__init__(**kwargs)
def create_state(self, index, weight):
return None
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
weight[:] += - lr/2 * (grad + wd * weight) + normal(0, math.sqrt(lr), shape=weight.shape,
dtype=weight.dtype, ctx=weight.context)
@register # pylint: disable=invalid-name
class ccSGD(SGD):
"""[DEPRECATED] Same as `SGD`. Left here for backward compatibility."""
def __init__(self, *args, **kwargs):
super(ccSGD, self).__init__(*args, **kwargs)
@register
class Adam(Optimizer):
"""The Adam optimizer.
This class implements the optimizer described in *Adam: A Method for
Stochastic Optimization*, available at http://arxiv.org/abs/1412.6980.
If the storage types of weight and grad are both ``row_sparse``, and ``lazy_update`` is True, \
**lazy updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = clip(grad[row] * rescale_grad + wd * weight[row], clip_gradient)
m[row] = beta1 * m[row] + (1 - beta1) * rescaled_grad[row]
v[row] = beta2 * v[row] + (1 - beta2) * (rescaled_grad[row]**2)
w[row] = w[row] - learning_rate * m[row] / (sqrt(v[row]) + epsilon)
The lazy update only updates the mean and var for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all indices.
Compared with the original update, it can provide large improvements in model training
throughput for some applications. However, it provides slightly different semantics than
the original update, and may lead to different empirical results.
Otherwise, **standard updates** are applied by::
rescaled_grad = clip(grad * rescale_grad + wd * weight, clip_gradient)
m = beta1 * m + (1 - beta1) * rescaled_grad
v = beta2 * v + (1 - beta2) * (rescaled_grad**2)
w = w - learning_rate * m / (sqrt(v) + epsilon)
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
For details of the update algorithm, see :class:`~mxnet.ndarray.adam_update`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
lazy_update : bool, optional
Default is True. If True, lazy updates are applied \
if the storage types of weight and grad are both ``row_sparse``.
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
lazy_update=True, **kwargs):
super(Adam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.lazy_update = lazy_update
def create_state(self, index, weight):
stype = weight.stype if self.lazy_update else 'default'
return (zeros(weight.shape, weight.context, dtype=weight.dtype,
stype=stype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype,
stype=stype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
coef1 = 1. - self.beta1**t
coef2 = 1. - self.beta2**t
lr *= math.sqrt(coef2)/coef1
kwargs = {'beta1': self.beta1, 'beta2': self.beta2, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
mean, var = state
adam_update(weight, grad, mean, var, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaGrad(Optimizer):
"""AdaGrad optimizer.
This class implements the AdaGrad optimizer described in *Adaptive Subgradient
Methods for Online Learning and Stochastic Optimization*, and available at
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
See Also
----------
:meth:`mxnet.ndarray.sparse.adagrad_update`.
Parameters
----------
eps: float, optional
Small value to avoid division by 0.
"""
def __init__(self, eps=1e-7, **kwargs):
super(AdaGrad, self).__init__(**kwargs)
self.float_stable_eps = eps
def create_state(self, index, weight):
return zeros(weight.shape, weight.context, stype=weight.stype) # history
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
is_sparse = weight.stype == 'row_sparse' and grad.stype == 'row_sparse'
history = state
if is_sparse:
kwargs = {'epsilon': self.float_stable_eps,
'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
sparse.adagrad_update(weight, grad, history, out=weight, lr=lr, wd=wd, **kwargs)
else:
grad = grad * self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
history[:] += square(grad)
div = grad / sqrt(history + self.float_stable_eps)
weight[:] += (div + weight * wd) * -lr
@register
class RMSProp(Optimizer):
"""The RMSProp optimizer.
Two versions of RMSProp are implemented:
If ``centered=False``, we follow
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf by
Tieleman & Hinton, 2012.
For details of the update algorithm see :class:`~mxnet.ndarray.rmsprop_update`.
If ``centered=True``, we follow http://arxiv.org/pdf/1308.0850v5.pdf (38)-(45)
by Alex Graves, 2013.
For details of the update algorithm see :class:`~mxnet.ndarray.rmspropalex_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
gamma1: float, optional
A decay factor of moving average over past squared gradient.
gamma2: float, optional
A "momentum" factor. Only used if `centered`=``True``.
epsilon : float, optional
Small value to avoid division by 0.
centered : bool, optional
Flag to control which version of RMSProp to use.
``True`` will use Graves's version of `RMSProp`,
``False`` will use Tieleman & Hinton's version of `RMSProp`.
clip_weights : float, optional
Clips weights into range ``[-clip_weights, clip_weights]``.
"""
def __init__(self, learning_rate=0.001, gamma1=0.9, gamma2=0.9,
epsilon=1e-8, centered=False, clip_weights=None, **kwargs):
super(RMSProp, self).__init__(learning_rate=learning_rate, **kwargs)
self.gamma1 = gamma1
self.gamma2 = gamma2
self.centered = centered
self.epsilon = epsilon
self.clip_weights = clip_weights
def create_state(self, index, weight):
if self.centered:
return (
zeros(weight.shape, weight.context, stype=weight.stype), # n
zeros(weight.shape, weight.context, stype=weight.stype), # g
zeros(weight.shape, weight.context, stype=weight.stype)) # delta
else:
return (zeros(weight.shape, weight.context, stype=weight.stype),) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
kwargs = {'gamma1': self.gamma1, 'epsilon': self.epsilon,
'rescale_grad': self.rescale_grad}
if self.centered:
kwargs['gamma2'] = self.gamma2
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
if self.clip_weights:
kwargs['clip_weights'] = self.clip_weights
if not self.centered:
(n, ) = state
rmsprop_update(
weight, grad, n, out=weight, lr=lr, wd=wd, **kwargs)
else:
n, g, delta = state
rmspropalex_update(weight, grad, n, g, delta, out=weight,
lr=lr, wd=wd, **kwargs)
@register
class AdaDelta(Optimizer):
"""The AdaDelta optimizer.
This class implements AdaDelta, an optimizer described in *ADADELTA: An adaptive
learning rate method*, available at https://arxiv.org/abs/1212.5701.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
rho: float
Decay rate for both squared gradients and delta.
epsilon : float
Small value to avoid division by 0.
"""
def __init__(self, rho=0.90, epsilon=1e-5, **kwargs):
super(AdaDelta, self).__init__(**kwargs)
self.rho = rho
self.epsilon = epsilon
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context), # accumulated g
zeros(weight.shape, weight.context)) # accumulated delta
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
wd = self._get_wd(index)
self._update_count(index)
# preprocess grad
grad *= self.rescale_grad
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# accumulated g and delta initlization
acc_g, acc_delta = state
# update g, delta
acc_g[:] = self.rho * acc_g + (1. - self.rho) * grad * grad
current_delta = sqrt(acc_delta + self.epsilon) / sqrt(acc_g + self.epsilon) * grad
acc_delta[:] = self.rho * acc_delta + (1. - self.rho) * current_delta * current_delta
# update weight
weight[:] -= current_delta + wd * weight
#pylint: disable=invalid-name
#pylint: disable=line-too-long
@register
class Ftrl(Optimizer):
"""The Ftrl optimizer.
Referenced from *Ad Click Prediction: a View from the Trenches*, available at
http://dl.acm.org/citation.cfm?id=2488200.
eta :
.. math::
\\eta_{t,i} = \\frac{learningrate}{\\beta+\\sqrt{\\sum_{s=1}^tg_{s,i}^2}}
The optimizer updates the weight by::
rescaled_grad = clip(grad * rescale_grad, clip_gradient)
z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate
n += rescaled_grad**2
w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1)
If the storage types of weight, state and grad are all ``row_sparse``, \
**sparse updates** are applied by::
for row in grad.indices:
rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient)
z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate
n[row] += rescaled_grad[row]**2
w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1)
The sparse update only updates the z and n for the weights whose row_sparse
gradient indices appear in the current batch, rather than updating it for all
indices. Compared with the original update, it can provide large
improvements in model training throughput for some applications. However, it
provides slightly different semantics than the original update, and
may lead to different empirical results.
For details of the update algorithm, see :class:`~mxnet.ndarray.ftrl_update`.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
lamda1 : float, optional
L1 regularization coefficient.
learning_rate : float, optional
The initial learning rate.
beta : float, optional
Per-coordinate learning rate correlation parameter.
"""
def __init__(self, lamda1=0.01, learning_rate=0.1, beta=1, **kwargs):
super(Ftrl, self).__init__(**kwargs)
self.lamda1 = lamda1
self.beta = beta
self.lr = learning_rate
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, stype=weight.stype), # z
zeros(weight.shape, weight.context, stype=weight.stype)) # n
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
wd = self._get_wd(index)
lr = self._get_lr(index)
kwargs = {'lamda1': self.lamda1, 'beta': self.beta, 'rescale_grad': self.rescale_grad}
if self.clip_gradient:
kwargs['clip_gradient'] = self.clip_gradient
# accumulated g and delta initialization
z, n = state
ftrl_update(weight, grad, z, n, out=weight,
lr=lr, wd=wd, **kwargs)
# pylint: enable=line-too-long
@register
class Adamax(Optimizer):
"""The AdaMax optimizer.
It is a variant of Adam based on the infinity norm
available at http://arxiv.org/abs/1412.6980 Section 7.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
"""
def __init__(self, learning_rate=0.002, beta1=0.9, beta2=0.999, **kwargs):
super(Adamax, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
lr /= (1. - self.beta1**t)
# preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# update m_t and u_t
m_t, u_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
u_t[:] = maximum(self.beta2 * u_t, NDabs(grad))
# update weight
weight[:] -= lr * m_t / u_t
@register
class Nadam(Optimizer):
"""The Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum available
at http://cs229.stanford.edu/proj2015/054_report.pdf.
This optimizer accepts the following parameters in addition to those accepted
by :class:`.Optimizer`.
Parameters
----------
beta1 : float, optional
Exponential decay rate for the first moment estimates.
beta2 : float, optional
Exponential decay rate for the second moment estimates.
epsilon : float, optional
Small value to avoid division by 0.
schedule_decay : float, optional
Exponential decay rate for the momentum schedule
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
schedule_decay=0.004, **kwargs):
super(Nadam, self).__init__(learning_rate=learning_rate, **kwargs)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.schedule_decay = schedule_decay
self.m_schedule = 1.
def create_state(self, index, weight):
return (zeros(weight.shape, weight.context, dtype=weight.dtype), # mean
zeros(weight.shape, weight.context, dtype=weight.dtype)) # variance
def update(self, index, weight, grad, state):
assert(isinstance(weight, NDArray))
assert(isinstance(grad, NDArray))
self._update_count(index)
lr = self._get_lr(index)
wd = self._get_wd(index)
t = self._index_update_count[index]
# preprocess grad
grad = grad * self.rescale_grad + wd * weight
if self.clip_gradient is not None:
grad = clip(grad, -self.clip_gradient, self.clip_gradient)
# warming momentum schedule
momentum_t = self.beta1 * (1. - 0.5 * (pow(0.96, t * self.schedule_decay)))
momentum_t_1 = self.beta1 * (1. - 0.5 * (pow(0.96, (t + 1) * self.schedule_decay)))
self.m_schedule = self.m_schedule * momentum_t
m_schedule_next = self.m_schedule * momentum_t_1
# update m_t and v_t
m_t, v_t = state
m_t[:] = self.beta1 * m_t + (1. - self.beta1) * grad
v_t[:] = self.beta2 * v_t + (1. - self.beta2) * grad * grad
grad_prime = grad / (1. - self.m_schedule)
m_t_prime = m_t / (1. - m_schedule_next)
v_t_prime = v_t / (1. - pow(self.beta2, t))
m_t_bar = (1. - momentum_t) * grad_prime + momentum_t_1 * m_t_prime
# update weight
weight[:] -= lr * m_t_bar / (sqrt(v_t_prime) + self.epsilon)
@register
class Test(Optimizer):
"""The Test optimizer"""
def __init__(self, **kwargs):
super(Test, self).__init__(**kwargs)
def create_state(self, index, weight):
"""Creates a state to duplicate weight."""
return zeros(weight.shape, weight.context)
def update(self, index, weight, grad, state):
"""Performs w += rescale_grad * grad."""
weight[:] += grad * self.rescale_grad
state[:] = weight
# backward compatibility wrapper for Optimizer.CreateOptimizer
create = Optimizer.create_optimizer # pylint: disable=invalid-name
class Updater(object):
"""Updater for kvstore."""
def __init__(self, optimizer):
self.optimizer = optimizer
self.states = {}
self.states_synced = {}
def __call__(self, index, grad, weight):
"""Updates weight given gradient and index."""
# convert ctypes.char_p.value back to python str if needed
if isinstance(index, bytes):
index = py_str(index)
if index not in self.states:
self.states[index] = self.optimizer.create_state_multi_precision(index, weight)
self.states_synced[index] = True
elif not self.states_synced[index]:
self.states[index] = \
self.sync_state_context(self.states[index], weight.context)
self.states_synced[index] = True
self.optimizer.update_multi_precision(index, weight, grad, self.states[index])
def sync_state_context(self, state, context):
"""sync state context."""
if isinstance(state, NDArray):
return state.as_in_context(context)
elif isinstance(state, (tuple, list)):
synced_state = (self.sync_state_context(i, context) for i in state)
if isinstance(state, tuple):
return tuple(synced_state)
else:
return list(synced_state)
else:
return state
def set_states(self, states):
"""Sets updater states."""
states = pickle.loads(states)
if isinstance(states, tuple) and len(states) == 2:
self.states, self.optimizer = states
else:
self.states = states
self.states_synced = dict.fromkeys(self.states.keys(), False)
def get_states(self, dump_optimizer=False):
"""Gets updater states.
Parameters
----------
dump_optimizer : bool, default False
Whether to also save the optimizer itself. This would also save optimizer
information such as learning rate and weight decay schedules.
"""
return pickle.dumps((self.states, self.optimizer) if dump_optimizer else self.states)
def get_updater(optimizer):
"""Returns a closure of the updater needed for kvstore.
Parameters
----------
optimizer: Optimizer
The optimizer.
Returns
-------
updater: function
The closure of the updater.
"""
return Updater(optimizer)
| apache-2.0 | 7,488,442,777,190,059,000 | 37.560895 | 129 | 0.600761 | false |
christopherbugg/python-scripts | seer2.py | 1 | 5183 | # seer2.py
# python 3.5
# Simple demo of super-basic neural-network weather prediction.
# This version is re-written and very cleaned-up.
# Chris Bugg
# Created: 2/1/15
# Updated: 3/23/17
# TODO: Add other things to track
# TODO: Add API to fetch WX data
# TODO: Create dynamic version
import random
class Seer:
# Multiplier models
rawlins_model = [.1]
laramie_model = [.1]
cheyenne_model = [.1]
# Training conditions
training_conditions_rawlins = [80]
training_conditions_laramie = [70]
training_conditions_cheyenne = [60]
# Training actual forecast
training_forecast = [75.0]
# Validation conditions
validation_conditions_rawlins = [60]
validation_conditions_laramie = [50]
validation_conditions_cheyenne = [40]
# Validation actual forecast
validation_forecast = [55.0]
# Predicted forecast
predicted_forecast = [10.0]
# Low chances, used to help randomness
low_chance = [0, 0, 0, 0.0001, -0.0001]
# Constructor
def __init__(self):
# Initial
self.initial()
# Training
self.training()
# Validation
self.validation()
# Update model values based on actual forecast
def update_model(self):
# If our prediction was too low, bump up model weights
if self.training_forecast[0] > self.predicted_forecast[0]:
self.rawlins_model[0] = self.rawlins_model[0] * 1.01 + random.choice(self.low_chance)
self.laramie_model[0] = self.laramie_model[0] * 1.01 + random.choice(self.low_chance)
self.cheyenne_model[0] = self.cheyenne_model[0] * 1.01 + random.choice(self.low_chance)
# If our prediction was too high, bump down model weights
elif self.training_forecast[0] < self.predicted_forecast[0]:
self.rawlins_model[0] = self.rawlins_model[0] * 0.99 + random.choice(self.low_chance)
self.laramie_model[0] = self.laramie_model[0] * 0.99 + random.choice(self.low_chance)
self.cheyenne_model[0] = self.cheyenne_model[0] * 0.99 + random.choice(self.low_chance)
# Make prediction based on model values
def training_predict(self):
self.predicted_forecast[0] = self.training_conditions_rawlins[0] * self.rawlins_model[0] + \
self.training_conditions_laramie[0] * self.laramie_model[0] + \
self.training_conditions_cheyenne[0] * self.cheyenne_model[0]
# Make prediction based on model values
def validation_predict(self):
self.predicted_forecast[0] = self.validation_conditions_rawlins[0] * self.rawlins_model[0] + \
self.validation_conditions_laramie[0] * self.laramie_model[0] + \
self.validation_conditions_cheyenne[0] * self.cheyenne_model[0]
# Make initial prediction based on initial values
def initial(self):
print("--Initial Run--")
# Print Current Conditions
print("Current Conditions: ")
print("Rawlins: " + str(self.training_conditions_rawlins))
print("Laramie: " + str(self.training_conditions_laramie))
print("Cheyenne: " + str(self.training_conditions_cheyenne))
# Print Predicted Forecast
print("Predicted Forecast Laramie: " + str(self.predicted_forecast))
# Print Actual Forecast
print("Actual Forecast Laramie: " + str(self.training_forecast))
# Train model based on training data
def training(self):
# Training
print("--Training...")
# Number times to train
iterations = 2000000
# Loop x times and train the model
for x in range(0, iterations):
# Updated model based on actual forecast
self.update_model()
# Update prediction values based on updated model
self.training_predict()
print("--Training Run--")
# Print Current Conditions
print("Current Conditions: ")
print("Rawlins: " + str(self.training_conditions_rawlins))
print("Laramie: " + str(self.training_conditions_laramie))
print("Cheyenne: " + str(self.training_conditions_cheyenne))
# Print Predicted Forecast
print("Predicted Forecast Laramie: " + str(self.predicted_forecast))
# Print Actual Forecast
print("Actual Forecast Laramie: " + str(self.training_forecast))
# Test models' behavior on new data
def validation(self):
# Perform Prediction based on trained model
self.validation_predict()
print("--Validation Run--")
# Print Current Conditions
print("Current Conditions: ")
print("Rawlins: " + str(self.validation_conditions_rawlins))
print("Laramie: " + str(self.validation_conditions_laramie))
print("Cheyenne: " + str(self.validation_conditions_cheyenne))
# Print Predicted Forecast
print("Predicted Forecast Laramie: " + str(self.predicted_forecast))
# Print Actual Forecast
print("Actual Forecast Laramie: " + str(self.validation_forecast))
Seer()
| mit | 8,330,008,287,202,172,000 | 31.803797 | 102 | 0.626278 | false |
CityOfPhiladelphia/curbside-geocoder | addrparse/addrparse.py | 1 | 25333 | """
Philadelphia Address Stanardizer
Author: Tom Swanson
Created: 8/25/2014
Last Updated: 10/10/2014
Version: 1.0
"""
import csv, sys, os, time, math, re
from datetime import datetime
'''
CLASS DEFINITIONS
'''
class suffix:
def __init__(self, row):
# 0 - not a suffix
# 1 - standard suffix abbr
# 2 - long suffix
# 3 - common abbr
self.full = row[0]
self.common = row[1]
self.correct = row[2]
self.std = '3'
#self.notes = row[4]
if(self.common == self.full):
self.std = '2'
if(self.common == self.correct):
self.std = '1'
class addrnum:
def __init__(self):
self.addrnumlow =-1
self.addrnumhigh = -1
self.addrnumstrlow = ''
self.addrnumstrhigh = ''
self.oeb = ''
self.isaddr = False
class addr:
def __init__(self):
self.parsetype = ''
self.addr = addrnum()
self.predir = ''
self.streetname = ''
self.suffix = ''
self.postdir = ''
self.unitdesignator = ''
self.predir2 = ''
self.streetname2 = ''
self.suffix2 = ''
self.postdir2 = ''
self.unitdesignator2 = ''
class directional:
def __init__(self, row):
# 0 - not a dir
# 1 - abbr dir N,E,S,W
# 2 - long dir NORTH,EAST,SOUTH,WEST
self.full = row[0]
self.common = row[1]
self.correct = row[2]
self.std = '1'
#self.notes = row[4]
if(self.common == self.full):
self.std = '2'
class addordinal:
def __init__(self, row):
self.ordigit = row[0]
self.orsuffix = row[1]
class saint:
def __init__(self, row):
self.saintName = row[0]
class namestd:
def __init__(self, row):
self.correct = row[0]
self.common = row[1]
class apt:
def __init__(self, row):
self.correct = row[0]
self.common = row[1]
class apte:
def __init__(self, row):
self.correct = row[0]
self.common = row[1]
'''
SETUP FUNCTIONS
'''
def csv_path(file_name):
return os.path.join(cwd, file_name + '.csv')
def createSuffixLookup():
path = csv_path('suffix')
f = open(path, 'r')
lookup = {}
try:
reader = csv.reader(f)
for row in reader:
r = suffix(row)
lookup[r.common] = r
except:
print('Error opening ' + path, sys.exc_info()[0])
f.close()
return lookup
def createDirLookup():
path = csv_path('directional')
f = open(path, 'r')
lookup = {}
try:
reader = csv.reader(f)
for row in reader:
r = directional(row)
lookup[r.common] = r
except:
print('Error opening ' + path, sys.exc_info()[0])
f.close()
return lookup
def createOrdinalLookup():
lookup = {}
r = addordinal(['1','ST'])
lookup[r.ordigit] = r
r = addordinal(['2','ND'])
lookup[r.ordigit] = r
r = addordinal(['3','RD'])
lookup[r.ordigit] = r
r = addordinal(['4','TH'])
lookup[r.ordigit] = r
r = addordinal(['5','TH'])
lookup[r.ordigit] = r
r = addordinal(['6','TH'])
lookup[r.ordigit] = r
r = addordinal(['7','TH'])
lookup[r.ordigit] = r
r = addordinal(['8','TH'])
lookup[r.ordigit] = r
r = addordinal(['9','TH'])
lookup[r.ordigit] = r
r = addordinal(['0','TH'])
lookup[r.ordigit] = r
return lookup
def createSaintLookup():
path = csv_path('saint')
f = open(path, 'r')
lookup = {}
try:
reader = csv.reader(f)
for row in reader:
r = saint(row)
lookup[r.saintName] = r
except:
print('Error opening ' + path, sys.exc_info()[0])
f.close()
return lookup
def createNameStdLookup():
path = csv_path('std')
f = open(path, 'r')
lookup = {}
try:
reader = csv.reader(f)
for row in reader:
r = namestd(row)
lookup[r.common] = r
except:
print('Error opening ' + path, sys.exc_info()[0])
f.close()
return lookup
def createAptLookup():
path = csv_path('apt')
f = open(path, 'r')
lookup = {}
try:
reader = csv.reader(f)
for row in reader:
r = apt(row)
lookup[r.common] = r
except:
print('Error opening ' + path, sys.exc_info()[0])
f.close()
return lookup
def createApteLookup():
path = csv_path('apte')
f = open(path, 'r')
lookup = {}
try:
reader = csv.reader(f)
for row in reader:
r = apte(row)
lookup[r.common] = r
except:
print('Error opening ' + path, sys.exc_info()[0])
f.close()
return lookup
'''
TYPE TESTS
'''
def isSuffix(test):
# 0 - not a suffix
# 1 - standard suffix abbr
# 2 - long suffix
# 3 - common abbr
try:
suf = suffixLookup[test]
except KeyError:
row = [' ', test, ' ']
suf = suffix(row)
suf.std = '0'
return suf
def isDir(test):
# 0 - not a dir
# 1 - abbr dir N,E,S,W
# 2 - long dir NORTH,EAST,SOUTH,WEST
try:
dir = dirLookup[test]
except KeyError:
row = [' ', test, ' ']
dir = directional(row)
dir.std = '0'
return dir
def isSaint(test):
ret = True
try:
snt = saintLookup[test]
except KeyError:
ret = False
return ret
def isNameStd(test):
try:
nstd = nameStdLookup[test]
except KeyError:
row = ['', test]
nstd = namestd(row)
return nstd
def isApt(test):
try:
apttemp = aptLookup[test]
except KeyError:
row = ['',test]
apttemp = apt(row)
return apttemp
def isApte(test):
try:
aptetemp = apteLookup[test]
except KeyError:
row = ['',test]
aptetemp = apte(row)
return aptetemp
# Standardize names
def nameStd(tokens):
i = len(tokens)
j = 0
while (i>0):
j = 0
while (j+i<=len(tokens)):
nstd = isNameStd(' '.join(tokens[j:j+i]))
if(nstd.correct!=''):
tokens[j] = nstd.correct
k = j+1
while(k<j+i):
tokens[k] = ''
k += 1
j += 1
i -= 1
temp = " ".join(tokens).split()
if(len(temp)>0 and temp[0].isdigit()):
temp = addOrdinal(temp)
return temp
'''
TYPE HANDLERS
'''
def handleSt(tokens):
i=0
while(i<len(tokens)-1):
if(tokens[i]== 'ST' and isSaint(tokens[i+1])):
tokens[i] = 'SAINT'
elif(tokens[i]== 'ST' and tokens[i+1][len(tokens[i+1])-1] == 'S'):
test = tokens[i+1]
testlen = len(test)
if(isSaint(test[0:testlen-1])):
tokens[i] = 'SAINT'
i +=1
return tokens
def handleApt(tokens):
tlen = len(tokens)
i = 0
while(i<tlen-1):
if(isOrdinal(tokens[i]) == True and (tokens[i+1] in aptfloor)):
return [i,tokens[i]+' FL']
addrn = isAddr(tokens[i+1], 2)
apt = isApt(tokens[i])
if(addrn.isaddr == True and apt.correct != ''):
return [i,' '.join(tokens[i:])]
i += 1
if(tlen>2):
addrn = isAddr(tokens[tlen-1], 2)
apt = isApt(tokens[tlen - 2])
#tempDir1 = isDir(tokens[tlen-2],dirLookup)
#tempSuffix1 = isSuffix(tokens[tlen-2],suffixLookup)
if(addrn.isaddr == True and apt.correct != ''):
return [tlen-2,apt.correct+' '+addrn.addrnumstrlow]
elif(addrn.isaddr == True):
return [tlen-1,addrn.addrnumstrlow]
return [-1, '']
def handleMt(tokens):
i = 0
while i < len(tokens) - 1:
if tokens[i] == 'MT':
tokens[i] = 'MOUNT'
i += 1
return tokens
def handleDirSpaces(tokens):
return tokens
def isOrdinal(token):
tlen= len(token)
if(tlen>2):
test = token[tlen-3:]
if test in ordinal:
return True
return False
def addOrdinal(str):
lastchar = str[0][-1:]
ord=addOrdinalLookup[lastchar]
str[0] = str[0]+ord.orsuffix
return str
def isAddr(test, ver):
#type:
# 0 = standard
# 1 = unit designator (need to allow single Char)
#break between alpha,num,_,-,/
half = False
if len(test) > 2 and test[-3:] == '1/2':
half = True
test = test[:-3]
if test == 'ONE':
test = '1'
tokens = re.findall(r"[^\W\d_-]+|\d+|-|#|/", test)
tokenLen = len(tokens)
if tokenLen > 1 and tokens[-1].isalnum() == False:
tokens.pop()
tokenLen = len(tokens)
addr_ret = addrnum()
#9367-75
#925R-35
#Handle Address Ranges from DOR
if((tokenLen == 3 or tokenLen == 4) and tokens[0].isdigit() and tokens[tokenLen-2] == '-' and len(tokens[tokenLen-1]) <= 2 and tokens[tokenLen-1].isdigit()):
alowst = tokens[0][-2:]
ahighst = tokens[tokenLen-1][-2:]
if(int(alowst) % 2 == 0):
alowoeb = 'E'
else:
alowoeb = 'O'
if(int(ahighst) % 2 == 0):
ahighoeb = 'E'
else:
ahighoeb = 'O'
if(ahighoeb != alowoeb):
ahighoeb == 'B'
alowoeb == 'B'
ilow = int(alowst)
ihigh = int(ahighst)
if ilow > ihigh:
ahighoeb == 'U'
alowoeb == 'U'
if len(tokens[0]) > 2:
hundred = (int(tokens[0][0:-2]))*100
ilow = hundred+ilow
ihigh = hundred+ihigh
if tokenLen == 3:
addr_ret.addrnumlow =ilow
addr_ret.addrnumhigh = ihigh
addr_ret.addrnumstrlow = str(ilow)
addr_ret.addrnumstrhigh = str(ihigh)
addr_ret.oeb = ahighoeb
addr_ret.isaddr = True
else:
addr_ret.addrnumlow = ilow
addr_ret.addrnumhigh = ihigh
addr_ret.addrnumstrlow = str(ilow)+tokens[1]
addr_ret.addrnumstrhigh = str(ihigh)+tokens[1]
addr_ret.oeb = ahighoeb
addr_ret.isaddr = True
return addr_ret
#2201 1/2-03
#Handle Address Ranges from DOR
if tokenLen == 6 and \
tokens[0].isdigit() and \
tokens[1] == '1' and \
tokens[2] == '/' and \
tokens[3] == '2' and \
tokens[4] == '-' and \
tokens[5].isdigit():
alowst = tokens[0][-2:]
ahighst = tokens[5][-2:]
if int(alowst) % 2 == 0:
alowoeb = 'E'
else:
alowoeb = 'O'
if int(ahighst) % 2 == 0:
ahighoeb = 'E'
else:
ahighoeb = 'O'
if ahighoeb != alowoeb:
ahighoeb == 'B'
alowoeb == 'B'
ilow = int(alowst)
ihigh = int(ahighst)
if(ilow> ihigh):
ahighoeb == 'U'
alowoeb == 'U'
if len(tokens[0]) > 2:
hundred = int(tokens[0][:-2]) * 100
ilow = hundred + ilow
ihigh = hundred + ihigh
addr_ret.addrnumlow =ilow
addr_ret.addrnumhigh = ihigh
addr_ret.addrnumstrlow = str(ilow)+ ' 1/2'
addr_ret.addrnumstrhigh = str(ihigh)+ ' 1/2'
addr_ret.oeb = ahighoeb
addr_ret.isaddr = True
return addr_ret
if tokenLen == 1 and tokens[0].isdigit():
if(int(tokens[0]) % 2 == 0):
addr_ret.oeb = 'E'
else:
addr_ret.oeb = 'O'
if(half == True):
tokens.append(' 1/2')
# and addess number of zero, return as true but blank it out
if(tokens[0] == '0' or tokens[0] == '00'):
addr_ret.addrnumlow =-1
addr_ret.addrnumhigh = -1
addr_ret.oeb = 'U'
addr_ret.addrnumstrlow = ''
addr_ret.addrnumstrhigh = ''
addr_ret.isaddr = True
else:
addr_ret.addrnumlow = int(tokens[0])
addr_ret.addrnumhigh = int(tokens[0])
addr_ret.addrnumstrlow = ''.join(tokens)
addr_ret.addrnumstrhigh = ''.join(tokens)
addr_ret.isaddr = True
return addr_ret
#A
if(ver == 2 and tokenLen == 1 and len(tokens[0]) == 1 and tokens[0].isalpha()):
if(half == True):
tokens.append(' 1/2')
addr_ret.oeb = 'U'
addr_ret.addrnumlow = 0
addr_ret.addrnumhigh = 0
addr_ret.addrnumstrlow = ''.join(tokens)
addr_ret.addrnumstrhigh = ''.join(tokens)
addr_ret.isaddr = True
return addr_ret
#NNAA
if(tokenLen == 2 and tokens[0].isdigit()):
# numeric street
if(tokens[1] == 'ST' or tokens[1] == 'ND' or tokens[1] == 'RD' or tokens[1] == 'TH'):
addr_ret.isaddr = False
else:
if(half == True):
tokens.append(' 1/2')
if(int(tokens[0]) % 2 == 0):
addr_ret.oeb = 'E'
else:
addr_ret.oeb = 'O'
addr_ret.addrnumlow = int(tokens[0])
addr_ret.addrnumhigh = int(tokens[0])
addr_ret.addrnumstrlow = ''.join(tokens)
addr_ret.addrnumstrhigh = ''.join(tokens)
addr_ret.isaddr = True
#AANN
if(tokenLen == 2 and tokens[1].isdigit()):
if(int(tokens[1]) % 2 == 0):
addr_ret.oeb = 'E'
else:
addr_ret.oeb = 'O'
if(half == True):
tokens.append(' 1/2')
addr_ret.addrnumlow = int(tokens[1])
addr_ret.addrnumhigh = int(tokens[1])
addr_ret.addrnumstrlow = ''.join(tokens)
addr_ret.addrnumstrhigh = ''.join(tokens)
addr_ret.isaddr = True
#UU-NN
if(tokenLen > 2 and tokens[tokenLen-2]== '-' and tokens[tokenLen-1].isdigit()):
if(int(tokens[tokenLen-1]) % 2 == 0):
addr_ret.oeb = 'E'
else:
addr_ret.oeb = 'O'
if(half == True):
tokens.append(' 1/2')
addr_ret.addrnumlow = int(tokens[tokenLen-1])
addr_ret.addrnumhigh = int(tokens[tokenLen-1])
addr_ret.addrnumstrlow = ''.join(tokens)
addr_ret.addrnumstrhigh = ''.join(tokens)
addr_ret.isaddr = True
#NN-UU
if(tokenLen > 2 and tokens[tokenLen-2]== '-' and tokens[tokenLen-1].isalpha() and tokens[0].isdigit()):
if(int(tokens[0]) % 2 == 0):
addr_ret.oeb = 'E'
else:
addr_ret.oeb = 'O'
if(half == True):
tokens.append(' 1/2')
addr_ret.addrnumlow = int(tokens[0])
addr_ret.addrnumhigh = int(tokens[0])
addr_ret.addrnumstrlow = ''.join(tokens)
addr_ret.addrnumstrhigh = ''.join(tokens)
addr_ret.isaddr = True
#AANNAA
if(tokenLen == 3 and tokens[1].isdigit()):
if(int(tokens[1]) % 2 == 0):
addr_ret.oeb = 'E'
else:
addr_ret.oeb = 'O'
if(half == True):
tokens.append(' 1/2')
addr_ret.addrnumlow = int(tokens[1])
addr_ret.addrnumhigh = int(tokens[1])
addr_ret.addrnumstrlow = ''.join(tokens)
addr_ret.addrnumstrhigh = ''.join(tokens)
addr_ret.isaddr = True
#NNAANN - this is a bit unusual
if(tokenLen == 3 and tokens[0].isdigit() and tokens[2].isdigit()):
if(int(tokens[2]) % 2 == 0):
addr_ret.oeb = 'E'
else:
addr_ret.oeb = 'O'
if(half == True):
tokens.append(' 1/2')
addr_ret.addrnumlow = int(tokens[2])
addr_ret.addrnumhigh = int(tokens[2])
addr_ret.addrnumstrlow = ''.join(tokens)
addr_ret.addrnumstrhigh = ''.join(tokens)
addr_ret.isaddr = True
#AANNAANN
if(tokenLen == 4 and tokens[1].isdigit() and tokens[3].isdigit()):
if(int(tokens[3]) % 2 == 0):
addr_ret.oeb = 'E'
else:
addr_ret.oeb = 'O'
if(half == True):
tokens.append(' 1/2')
addr_ret.addrnumlow = int(tokens[3])
addr_ret.addrnumhigh = int(tokens[3])
addr_ret.addrnumstrlow = ''.join(tokens)
addr_ret.addrnumstrhigh = ''.join(tokens)
addr_ret.isaddr = True
#NNAANNAA
if(tokenLen == 4 and tokens[0].isdigit() and tokens[2].isdigit()):
if(int(tokens[2]) % 2 == 0):
addr_ret.oeb = 'E'
else:
addr_ret.oeb = 'O'
if(half == True):
tokens.append(' 1/2')
addr_ret.addrnumlow = int(tokens[2])
addr_ret.addrnumhigh = int(tokens[2])
addr_ret.addrnumstrlow = ''.join(tokens)
addr_ret.addrnumstrhigh = ''.join(tokens)
addr_ret.isaddr = True
return addr_ret
def parseAddr(item):
address = addr()
tempDir = {}
tempSuffix = {}
# Handle special characters
item = (
item.replace('.', ' ')
# .replace('')
.replace(',',' ')
.upper()
.replace('#', ' ')
.replace('\'', '')
)
item = ' '.join(item.split())
# TODO: this might break something
if item == '':
return
conjunctions = [' {} '.format(x) for x in ['AND', '@', 'AT', '&']]
if any(x in item for x in conjunctions) or ('/' in item and '1/' not in item):
if ' AND ' in item:
tokens = item.split(" AND ")
elif ' AT ' in item:
tokens = item.split(" AT ")
elif ' & ' in item:
tokens = item.split(" & ")
elif ' @ ' in item:
tokens = item.split(" @ ")
elif '/' in item:
tokens = item.split('/')
addr1 = parseAddr(tokens[0])
tokens = item.split()
tokenLen = len(tokens)
addrn = isAddr(tokens[0], 0)
if tokenLen > 1 and addrn.isaddr == True and len(tokens[1]) >= 3 and tokens[1][1] == '/':
addrn = isAddr(tokens[0] + ' ' + tokens[1], 0)
#addrn.addrnumstrlow = addrn.addrnumstrlow+' '+tokens[1]
#addrn.addrnumstrhigh = addrn.addrnumstrhigh+' '+tokens[1]
#address.addr = addrn
tokens = tokens[2:tokenLen]
elif addrn.isaddr == True and tokenLen > 1:
address.addr = addrn
tokens = tokens[1:]
tokenLen = len(tokens)
apt = handleApt(tokens)
address.unitdesignator = ''
if apt[0] != -1:
address.unitdesignator = apt[1]
tokens = tokens[0:apt[0]]
tokenLen = len(tokens)
if tokenLen == 1:
address.streetname = ' '.join(nameStd(tokens))
address.parsetype = 'A1'
return address
# TODO: ?
tokens = handleSt(tokens)
tokens = handleMt(tokens)
if tokenLen == 2:
tempSuffix1 = isSuffix(tokens[-1])
if tempSuffix1.std != '0':
address.addr = addrn
address.predir = ''
address.streetname = ' '.join(nameStd(tokens[:-1]))
address.suffix = tempSuffix1.correct
address.postdir = ''
address.parsetype = '2NS'
return address
tempDir1 = isDir(tokens[0])
if tempDir1.std != '0':
address.addr = addrn
address.predir = tempDir1.correct
address.streetname = ''.join(nameStd(tokens[1:]))
address.suffix = ''
address.postdir = ''
#address.unitdesignator = ''
address.parsetype = '2APN'
return address
if tempDir1.std != '0':
address.addr = addrn
address.predir = ''
address.streetname = ' '.join(nameStd(tokens[:-1]))
address.suffix = ''
address.postdir = tempDir1.correct
#address.unitdesignator = ''
address.parsetype = '2ANP'
return address
else:
address.addr = addrn
address.predir = ''
address.streetname = ' '.join(nameStd(tokens))
address.suffix = ''
address.postdir = ''
#address.unitdesignator = ''
address.parsetype = '2ANN'
return address
if tokenLen >= 3 and tokenLen < 7:
tempDir1 = isDir(tokens[0])
tempDir2 = isDir(tokens[tokenLen-1])
tempSuffix1 = isSuffix(tokens[tokenLen-1])
tempSuffix2 = isSuffix(tokens[tokenLen-2])
## Pattern addrnum dir name suffix
if tempDir1.std != '0' and tempSuffix1.std != '0':
address.addr = addrn
address.predir = tempDir1.correct
address.streetname = ' '.join(nameStd(tokens[1:-1]))
address.suffix = tempSuffix1.correct
address.postdir = ''
address.parsetype = 'ADNS'
return address
## Pattern addrnum name suffix dir
if tempDir2.std != '0' and tempSuffix2.std != '0':
address.addr = addrn
address.predir = ''
address.streetname = ' '.join(nameStd(tokens[:-2]))
address.suffix = tempSuffix2.correct
address.postdir = tempDir2.correct
address.parsetype = 'ANSD'
return address
## Pattern addrnum dir name suffix junk
if tempDir1.std == '1' and tempSuffix2.std == '1' and tempSuffix1.std != '0':
address.addr = addrn
address.predir = tempDir1.correct
address.streetname = ' '.join(nameStd(tokens[1:-2]))
address.suffix = tempSuffix2.correct
address.postdir = ''
address.parsetype = 'ADNSx'
return address
## Name and Suffix
if tempDir1.std == '0' and tempDir2.std == '0' and tempSuffix1.std != '0':
address.addr = addrn
address.predir = ''
address.streetname = ' '.join(nameStd(tokens[:-1]))
address.suffix = tempSuffix1.correct
address.postdir = ''
address.parsetype = 'ANS'
return address
#There's junk
if tokenLen >= 4:
tempDir1 = isDir(tokens[0])
tempDir2 = isDir(tokens[3])
tempSuffix1 = isSuffix(tokens[2])
tempSuffix2 = isSuffix(tokens[3])
# predir name suffix junk
if tempDir1.std != '0' and tempSuffix1.std == '1':
address.addr = addrn
address.predir = tempDir1.correct
address.streetname = ' '.join(nameStd(tokens[1:2]))
address.suffix = tempSuffix1.correct
address.postdir = ''
address.parsetype = '4ADNSx'
return address
# predir name name suffix
if tempDir1.std != '0' and tempSuffix2.std != '0':
address.addr = addrn
address.predir = tempDir1.correct
address.streetname = ' '.join(nameStd(tokens[1:3]))
address.suffix = tempSuffix2.correct
address.postdir = ''
address.parsetype = '4APNNSx'
return address
if tokenLen == 3:
tempDir1 = isDir(tokens[0])
tempDir2 = isDir(tokens[2])
tempSuffix1 = isSuffix(tokens[1])
tempSuffix2 = isSuffix(tokens[2])
#predir name name suffix
if tempDir1.std == '0' and tempSuffix1.std == '0' and tempDir2.std == '0' and tempSuffix2.std == '0':
address.addr = addrn
address.predir = ''
address.streetname = ' '.join(nameStd(tokens[0:3]))
address.suffix = ''
address.postdir = ''
address.parsetype = '3NNN'
return address
if tempDir1.std != '0' and tempSuffix1.std == '0' and tempDir2.std == '0' and tempSuffix2.std == '0':
address.addr = addrn
address.predir = tempDir1.correct
address.streetname = ' '.join(nameStd(tokens[1:3]))
address.suffix = ''
address.postdir = ''
address.parsetype = '3DNN'
return address
address.parsetype = 'TODO'
address.streetname = ' '.join(nameStd(tokens))
return address
'''
RUN
'''
cwd = os.path.dirname(__file__)
# Get config
# config_path = os.path.join(cwd, 'config.py')
# return_dict = True if CONFIG['return_dict'] else False
ordinal = ['1ST','2ND','3RD','4TH','5TH','6TH','7TH','8TH','9TH','0TH']
aptfloor = ['FL','FLR','FLOOR']
header = 'parse,input,oeb,alow,ahigh,astrlow,astrhigh,predir,streetname,suffix,postdir,unit,predir2,streetname2,suffix2,postdir2,unit2\n'
suffixLookup = createSuffixLookup()
dirLookup = createDirLookup()
saintLookup = createSaintLookup()
nameStdLookup = createNameStdLookup()
aptLookup = createAptLookup()
apteLookup = createApteLookup()
addOrdinalLookup = createOrdinalLookup()
class Parser():
def __init__(self, return_dict=False):
self.return_dict = return_dict
def parse(self, addr_str):
parsed = parseAddr(addr_str)
if self.return_dict:
# Hack to make nested addrnum a dict as well
parsed.addr = parsed.addr.__dict__
return parsed.__dict__
return parsed
# TEST
# if __name__ == '__main__':
# parser = Parser(return_dict=True)
# parsed = parser.parse('1718 N. Bailey Street')
# print(parsed) | mit | 6,505,887,107,445,061,000 | 26.124197 | 161 | 0.512888 | false |
bootchk/pensool | source/gui/manager/handle.py | 1 | 1810 | '''
Handle manager: ensure only one handle set is active.
HandleSet's are not in the model.
An active HandleSet is in the scheme and is drawn.
Generally Handle's draw on top of the morphs they handle.
However, it may be drawn invisibly !!!
E.G. points may be hidden by lines they handle.
E.G. style of handles may be invisible.
A HandleSet is drawn and picked in the transform of its parent morph.
A HandleSet is not a child of parent morph !!!
'''
'''
Copyright 2010, 2011 Lloyd Konneker
This file is part of Pensool.
Pensool is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
'''
import config
from decorators import *
current_handle_set = None
current_morph = None
@dump_event
def rouse(handle_set, morph, direction):
''' Rouse handle set on (visually) morph. '''
global current_handle_set, current_morph
if direction:
current_handle_set = handle_set
current_morph = morph
# FIXME put in scheme or append to morph and later remove
else:
current_handle_set = None
current_morph = None
#@dump_return
def pick(point):
''' Pick any handle of the current handle set. '''
picked = None
if current_handle_set:
context = config.viewport.user_context()
context.set_matrix(current_morph.retained_transform)
picked = current_handle_set.pick(context, point)
if picked:
picked.highlight(True)
# TODO unhighlight at appropriate time
return picked
def draw():
''' Draw current handle set. '''
if current_handle_set:
context = config.viewport.user_context()
context.set_matrix(current_morph.retained_transform)
return current_handle_set.draw(context)
| gpl-3.0 | -6,679,037,786,650,404,000 | 26.424242 | 69 | 0.725967 | false |
CityManager/start_django | start_django/urls.py | 1 | 1098 | """start_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
# from polls import urls as polls_urls
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^polls/', include('polls.urls')), # 将polls应用的url转发规则注册到start_django工程来
# url(r'^polls/', include(polls_urls)), # 两种方式均可,本方式需要将目标urls配置import进来
]
| apache-2.0 | -5,415,484,142,663,766,000 | 35.928571 | 81 | 0.702128 | false |
iansealy/projecteuler | 84.py | 1 | 2842 | #!/usr/bin/env python
"""This script solves the Project Euler problem "Monopoly odds". The problem
is: If, instead of using two 6-sided dice, two 4-sided dice are used, find the
six-digit modal string.
"""
import argparse
import random
def main(args):
"""Monopoly odds"""
# Constants
SQUARES = 40
CC_CARDS = 16
CH_CARDS = 16
GO = 0
# A1 = 1
CC1 = 2
# A2 = 3
# T1 = 4
R1 = 5
# B1 = 6
CH1 = 7
# B2 = 8
# B3 = 9
JAIL = 10
C1 = 11
U1 = 12
# C2 = 13
# C3 = 14
R2 = 15
# D1 = 16
CC2 = 17
# D2 = 18
# D3 = 19
# FP = 20
# E1 = 21
CH2 = 22
# E2 = 23
E3 = 24
R3 = 25
# F1 = 26
# F2 = 27
# U2 = 28
# F3 = 29
G2J = 30
# G1 = 31
# G2 = 32
CC3 = 33
# G3 = 34
# R4 = 35
CH3 = 36
# H1 = 37
# T2 = 38
H2 = 39
count = [0] * SQUARES
current = GO
double_run = 0
for _ in range(int(1e7)):
die1 = random.randrange(args.sides) + 1
die2 = random.randrange(args.sides) + 1
if die1 == die2:
double_run += 1
else:
double_run = 0
current = (current + die1 + die2) % SQUARES
if double_run == 3:
current = JAIL
double_run = 0
if current == G2J:
current = JAIL
if current in (CC1, CC2, CC3):
cc = random.randrange(CC_CARDS) + 1
if cc == 1:
current = GO
elif cc == 2:
current = JAIL
if current in (CH1, CH2, CH3):
ch = random.randrange(CH_CARDS) + 1
if ch == 1:
current = GO
elif ch == 2:
current = JAIL
elif ch == 3:
current = C1
elif ch == 4:
current = E3
elif ch == 5:
current = H2
elif ch == 6:
current = R1
elif (ch == 7 or ch == 8) and current == CH1:
current = R2
elif (ch == 7 or ch == 8) and current == CH2:
current = R3
elif (ch == 7 or ch == 8) and current == CH3:
current = R1
elif ch == 9 and current in (CH1, CH3):
current = U1
elif ch == 10:
current = (current - 3) % SQUARES
count[current] += 1
top = list(range(SQUARES))
top.sort(key=lambda i: count[i], reverse=True)
top = top[0:3]
print('{0[0]:02d}{0[1]:02d}{0[2]:02d}'.format(top))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Monopoly odds')
parser.add_argument(
'sides', metavar='SIDES', type=int, default=4, nargs='?',
help='Number of die sides')
args = parser.parse_args()
main(args)
| gpl-3.0 | 2,292,069,235,790,237,200 | 21.377953 | 78 | 0.446868 | false |
rytis/miniREST | miniREST/client.py | 1 | 2807 | import httplib
import base64
import string
class RESTResource(object):
def __init__(self):
self.status = None
self.reason = None
self.raw_data = None
class RESTClient(object):
"""
Simple interface to the REST web services. Supports 'GET', 'PUT', 'POST' and 'DELETE' methods.
Tailored towards JSON based services, although should be pretty straightforward to implement
different data payload methods:
- subclass from RESTClient
- implement _build_<data type>_payload method (see json example)
- pass data to get, put, etc method as 'data_<data type>' keyword argument
Examples:
c = RESTClient('api.example.com')
c.get('/api/v1/resource/')
c.put('/api/v1/resource/instance1/', data_json={'params': ['res1a', 'res1b']})
c.post('/api/v1/resource/', data_json={'name': 'instance2', 'params': ['res2a', 'res2b']})
c.delete('/api/v1/resource/instance1/')
c = RESTClient('https://secret-api.example.com', username='user1', password='secret1')
"""
def __init__(self, url, username=None, password=None):
self._method = None
self._url = url
if self._url.endswith('/'):
self._url = self._url[:-1]
self.headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
if username and password:
auth_string = 'Basic ' + string.strip(base64.encodestring(username + ':' + password))
self.headers['Authorization'] = auth_string
def _build_json_payload(self, data):
try:
import json
except ImportError:
raise RuntimeError('json not installed')
return json.dumps(data)
def _rest_call(self, resource=None, **kwargs):
http_body = None
if kwargs:
for key in kwargs:
if key.startswith('data_'):
http_body = getattr(self, "_build_%s_payload" % key[5:])(kwargs[key])
if self._url.startswith('https://'):
c = httplib.HTTPSConnection(self._url[8:])
elif self._url.startswith('http://'):
c = httplib.HTTPConnection(self._url[7:])
else:
c = httplib.HTTPConnection(self._url)
c.request(self._method.upper(), resource, body=http_body, headers=self.headers)
resp = c.getresponse()
rest_obj = RESTResource()
rest_obj.status = resp.status
rest_obj.reason = resp.reason
rest_obj.raw_data = resp.read()
c.close()
return rest_obj
def __getattr__(self, item):
if item not in ('get', 'put', 'post', 'delete'):
raise AttributeError("Method '%s' not implemented" % item)
self._method = item
return self._rest_call
| apache-2.0 | -6,613,027,225,643,095,000 | 36.426667 | 98 | 0.582829 | false |
asobolev/nix4nest | nix4nest/nix/weightstack.py | 1 | 2862 | from __future__ import absolute_import
from .nixbase import NixBase
import numpy as np
class WeightStack(NixBase):
def __init__(self, nix_object):
super(WeightStack, self).__init__(nix_object)
self._sources = []
self._targets = []
@property
def data(self):
return self._nix_object.data
@property
def data_type(self):
return self._nix_object.data_type
@property
def dimensions(self):
return self._nix_object.dimensions
def append_snapshot(self, snapshot, time):
self.data.append(snapshot)
# extend time dimension
if len(self._nix_object.dimensions) == 0:
self.build_dimensions([time], self._sources, self._targets)
else:
dim = self._nix_object.dimensions[0]
dim.ticks = np.array(list(dim.ticks) + [time])
def build_dimensions(self, times, sources, targets):
"""
Builds dimensions according to the given values.
:param times: ticks for time domain (in 'ms')
:param sources: list of NEST IDs of sources
:param targets: list of NEST IDs of targets
:return:
"""
for dim in (times, sources, targets):
assert(len(dim) > 0)
nix_array = self._nix_object
nix_array.append_range_dimension(times)
nix_array.append_range_dimension(sorted(sources))
nix_array.append_range_dimension(sorted(targets))
nix_array.dimensions[0].unit = 'ms'
nix_array.dimensions[0].label = 'time'
nix_array.dimensions[1].label = 'source'
nix_array.dimensions[2].label = 'target'
@staticmethod
def create_weight_stack(where, name, weights, sources, targets, times=()):
"""
Creates a Stack with connection weights evolution in time.
:param name: name of the weight stack (str)
:param where: block where to create Node (nix::Block)
:param weights: 3D array with connection weights. Dimensions:
- time (1)
- source (2)
- target (3)
:return: instance of WeightStack
"""
assert(hasattr(weights, 'dtype'))
assert(len(weights.shape) == 3)
for dim in (sources, targets):
assert(len(dim) > 0)
params = name, 'weightstack', weights.dtype, weights.shape
ws = where.create_data_array(*params)
ws.data.append(weights)
weightstack = WeightStack(ws)
if len(times) > 0:
weightstack.build_dimensions(sources, targets, times)
else: # need to temporary store these because 'time' should be first
weightstack._sources = sources
weightstack._targets = targets
return weightstack | lgpl-3.0 | 6,535,918,130,691,322,000 | 31.168539 | 78 | 0.579315 | false |
pombreda/ximenez | tests/test_shared_ssh.py | 1 | 1923 | """Tests for ``ximenez.shared.ssh`` module.
$Id$
"""
from base import XimenezTestCase
from ximenez.shared import ssh
from ximenez.shared import ConnectionException
class SSHTestCase(XimenezTestCase):
"""A test case for ``ximenez.shared.ssh`` module.
Some of theses tests presume that there is an SSH server listening
on port 22 of the local host.
"""
def test_escapeShellCommand(self):
commands = ('ls',
'cd /var ; ls',
'cd && ls',
'cd || ls',
'ls &',
'ls | grep foo',
'echo Foo!',
'echo !!',
'ls > foo',
'ls < foo',
'ls ~',
'ls *',
'ls {foo,bar}.txt',
'ls [fb]oo',
'ls ?oo',
'(ls)',
'echo US$380',
'echo 1\\2',
'echo `foo`')
from popen2 import popen3
for command in commands:
escaped = ssh.escapeShellCommand(command)
stdout, _, _ = popen3('echo %s' % escaped)
self.failUnlessEqual(stdout.read().strip(), command)
def test_executeShellCommand(self):
host = ssh.SSHRemoteHost('localhost')
self.failUnlessEqual(host.execute('echo "foo"'), 'foo')
self.failUnlessEqual(host.execute('ls /doesnotexist'),
'ls: /doesnotexist: No such file or directory')
def test_cannot_connect(self):
host = ssh.SSHRemoteHost('nonexistent', 222)
self.failUnlessRaises(ConnectionException, host.execute, 'ls')
def test_suite():
import unittest
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SSHTestCase))
return suite
if __name__ == '__main__':
import unittest
unittest.main()
| gpl-3.0 | -654,123,153,785,586,700 | 28.584615 | 76 | 0.50494 | false |
peace098beat/pyside_cookbook | 02_waveviewer/player.py | 1 | 2266 | import sys
import time
import threading
import pyaudio
class Player(object):
def __init__(self, buffer, chunk_size=None, rate=None, live=None):
self.rate = rate
self.buffer_size = buffer.size / 2
assert chunk_size < self.buffer_size
assert buffer.dtype == np.float32
self.buffer = buffer
self.chunk_size = chunk_size
self.live = live
self.paused = False
def _swap_buffers(self):
if self.live:
b0 = self.buffer[:self.buffer_size]
else:
b0 = np.zeros(self.buffer_size, dtype=np.float32)
self.buffer[:self.buffer_size], self.buffer[self.buffer_size:] = self.buffer[self.buffer_size:], b0
def _play_chunk(self):
chunk = self.buffer[self.i:self.i + self.chunk_size]
self.stream.write(chunk.tostring())
self.i += self.chunk_size
if self.i >= self.buffer_size:
self.i -= self.buffer_size
self._swap_buffers()
def _play(self):
# Open the stream on the background thread.
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format=pyaudio.paFloat32, channels=1, rate=self.rate, output=1)
if self.paused:
self.paused = False
while not self.paused:
self._play_chunk()
def play(self):
if not hasattr(self, '_thread'):
self.i = 0
self._thread = threading.Thread(target=self._play)
self._thread.daemon = True
self._thread.start()
def pause(self):
self.paused = True
time.sleep(2 * float(self.chunk_size) / self.rate)
self.stream.close()
self._thread.join()
del self._thread
def get_nowframe(self):
return self.i
def get_audio(self):
return self.buffer
if __name__ == '__main__':
import numpy as np
rate = 44100
buffer_duration = 1.
buffer_size = int(buffer_duration * rate)
chunk_size = 1024
buffer = np.zeros(2 * buffer_size, dtype=np.float32)
t = np.linspace(0., 2 * buffer_duration, 2 * buffer_size)
f0 = 440.
x = np.sin(2 * np.pi * f0 * t) * .1
buffer[:] = x
p = Player(buffer, chunk_size=chunk_size, rate=rate, live=True)
p.play()
p.pause()
| gpl-3.0 | -2,657,807,977,204,051,500 | 29.213333 | 107 | 0.579435 | false |
snowflakedb/snowflake-sqlalchemy | __init__.py | 1 | 1780 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
from sqlalchemy.types import (
BIGINT,
BINARY,
BOOLEAN,
CHAR,
DATE,
DATETIME,
DECIMAL,
FLOAT,
INT,
INTEGER,
REAL,
SMALLINT,
TIME,
TIMESTAMP,
VARCHAR,
)
from . import base, snowdialect
from .custom_commands import (
AWSBucket,
AzureContainer,
CopyFormatter,
CopyIntoStorage,
CreateFileFormat,
CreateStage,
CSVFormatter,
ExternalStage,
JSONFormatter,
MergeInto,
PARQUETFormatter,
)
from .custom_types import (
ARRAY,
BYTEINT,
CHARACTER,
DEC,
DOUBLE,
FIXED,
NUMBER,
OBJECT,
STRING,
TEXT,
TIMESTAMP_LTZ,
TIMESTAMP_NTZ,
TIMESTAMP_TZ,
TINYINT,
VARBINARY,
VARIANT,
)
from .util import _url as URL
from .version import VERSION
SNOWFLAKE_CONNECTOR_VERSION = '.'.join(str(v) for v in VERSION[0:3])
base.dialect = dialect = snowdialect.dialect
__version__ = SNOWFLAKE_CONNECTOR_VERSION
__all__ = (
'BIGINT',
'BINARY',
'BOOLEAN',
'CHAR',
'DATE',
'DATETIME',
'DECIMAL',
'FLOAT',
'INT',
'INTEGER',
'REAL',
'SMALLINT',
'TIME',
'TIMESTAMP',
'URL',
'VARCHAR',
'ARRAY',
'BYTEINT',
'CHARACTER',
'DEC',
'DOUBLE',
'FIXED',
'OBJECT',
'NUMBER',
'STRING',
'TEXT',
'TIMESTAMP_LTZ',
'TIMESTAMP_TZ',
'TIMESTAMP_NTZ',
'TINYINT',
'VARBINARY',
'VARIANT',
'MergeInto',
'CSVFormatter',
'JSONFormatter',
'PARQUETFormatter',
'CopyFormatter',
'CopyIntoStorage',
'AWSBucket',
'AzureContainer',
'ExternalStage',
'CreateStage',
'CreateFileFormat',
)
| apache-2.0 | 660,336,489,725,363,600 | 15.036036 | 70 | 0.580337 | false |
thomasdeniau/pyfauxfur | MorphogenesisImageData.py | 1 | 6397 | #!/usr/bin/env python
# encoding: utf-8
"""
Texture.py
Created by Olivier Le Floch on 2009-03-17.
Program written by Thomas Deniau and Olivier Le Floch.
Copyright (c) 2009. All rights reserved.
Portions of this code have been adapted from pygarrayimage :
http://pypi.python.org/pypi/pygarrayimage/0.0.5
http://code.astraw.com/projects/motmot/trac/browser/trunk/pygarrayimage
Please see the LICENSE file for this software and pygarrayimage's software
license.
"""
import unittest
import ctypes
from numpy import dstack, random, zeros
from scipy import weave
from time import time
from OpenGL.GL import *
from OpenGL.GLU import *
class MorphogenesisImageData:
def __init__(self, width, height, D_s, D_a, D_b, beta_i):
'''Initialize morphogenesis image data with specific calculation parameters
:Parameters:
`width` : int
Width in pixels of the calculated image
`height` : int
Height in pixels of the calculated image
`D_s` : float
`D_a` : float
`D_b` : float
`beta_i` : float
'''
print 'Generating texture with the following parameters :'
print '-r -s', D_s, '-a', D_a, '-b', D_b, '-d', beta_i, '-x', width, '-y', height
print ''
self.width = width
self.height = height
self.generate('stripe')
self.texture_id = glGenTextures(1) # Generate 1 texture name
glBindTexture(GL_TEXTURE_2D, self.texture_id)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
blank = (GLubyte * (width * height * 4))()
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0,
GL_RGBA, GL_UNSIGNED_BYTE, blank)
glFlush()
self.texture_row_length = 3*width
if self.texture_row_length & 0x1:
self.alignment = 1
elif self.texture_row_length & 0x2:
self.alignment = 2
else:
self.alignment = 4
self.D_s = D_s
self.D_a = D_a
self.D_b = D_b
self.beta_i = beta_i
self.iteration = 0
self.fps = 0
self.last_time = 1
def generate(self, generator):
self.grid_a = 8 * random.rand(self.width, self.height)
self.grid_b = 8 * random.rand(self.width, self.height)
if generator == 'stripe':
self.grid_a = self.grid_a / 8
self.grid_a[self.width / 2, :] = 8
def make_texture(self):
'''
Calculates the colors for each point in the grid, and then copies this
data into the image.
'''
z = zeros((self.width, self.height), 'd')
min = self.grid_a.min()
max = self.grid_a.max()
g = (self.grid_a - min) / (max - min)
self.grid = (255 * dstack((g, g, z))).astype('u1')
def dirty(self):
'''
Force an update of the texture data.
'''
glPushClientAttrib(GL_CLIENT_PIXEL_STORE_BIT)
glPixelStorei(GL_UNPACK_ALIGNMENT, self.alignment)
glPixelStorei(GL_UNPACK_ROW_LENGTH, self.width)
glTexSubImage2Dub(GL_TEXTURE_2D, 0, 0, 0, GL_RGB, self.grid)
glPopClientAttrib()
def step(self):
D_s = self.D_s
D_a = self.D_a
D_b = self.D_b
beta_i = self.beta_i
height = self.height
width = self.width
A_o = self.grid_a
A_n = zeros((width, height), 'd')
B_o = self.grid_b
B_n = zeros((width, height), 'd')
self.iteration += 1
t = time()
weave.inline(
'''
#line 119 "MorphogenesisImageData.py"
int i, j, iplus1, jplus1, iminus1, jminus1;
double A_ij, B_ij;
for (i = 0; i < width; i++) {
// Treat the surface as a torus by wrapping at the edges
iplus1 = i < width - 1 ? i + 1 : 0;
iminus1 = i > 0 ? i - 1 : width - 1;
for (j = 0; j < height; j++) {
jplus1 = j < height - 1 ? j + 1 : 0;
jminus1 = j > 0 ? j - 1 : height - 1;
A_ij = A_o(i, j); B_ij = B_o(i, j);
// Component A
A_n(i, j) = A_ij
// Reaction component
+ D_s * (16.0 - A_ij * B_ij)
// Diffusion component
+ D_a * (A_o(iplus1, j) - 2.0 * A_ij + A_o(iminus1, j) + A_o(i, jplus1) - 2.0 * A_ij + A_o(i, jminus1));
A_ij = A_n(i, j);
if (A_ij < 0.0) {
A_n(i, j) = 0.0;
} else if (A_ij > 8.0) {
A_n(i, j) = 8.0;
}
// Component B
B_n(i, j) = B_ij
// Reaction component
+ D_s * (A_ij * B_ij - B_ij - beta_i)
// Diffusion component
+ D_b * (B_o(iplus1, j) - 2.0 * B_ij + B_o(iminus1, j) + B_o(i, jplus1) - 2.0 * B_ij + B_o(i, jminus1));
B_ij = B_n(i, j);
if (B_ij < 0.0) {
B_n(i, j) = 0.0;
} else if (B_ij > 8.0) {
B_n(i, j) = 8.0;
}
}
}
''',
['D_s', 'D_a', 'D_b', 'beta_i', 'height', 'width', 'A_o', 'A_n', 'B_o', 'B_n'],
type_converters=weave.converters.blitz)
self.grid_a = A_n
self.grid_b = B_n
self.last_time = time() - t
self.fps = self.fps * 29. / 30. + 1. / (self.last_time * 30.)
def verboseStep(self):
print 'Start iteration', self.iteration
self.step()
print 'mean(A) =', self.grid_a.mean(), 'mean(B) =', self.grid_b.mean()
print 'Time : %fs'%self.last_time
def logDebugInfo(self):
print "Min A : %f, Min B : %f" %(self.grid_a.min(), self.grid_b.min())
print "Mean A : %f, Mean B : %f" %(self.grid_a.mean(), self.grid_b.mean())
print "Max A : %f, Max B : %f" %(self.grid_a.max(), self.grid_b.max())
def imageName(self):
return 'D_s=%s-D_a=%s-D_b=%s-beta_i=%s-iter=%d'%(
str(self.D_s), str(self.D_a), str(self.D_b), str(self.beta_i), self.iteration)
def __repr__(self):
return str((self.grid_a, self.grid_b))
class MorphogenesisImageDataTests(unittest.TestCase):
def setUp(self):
self.texture = MorphogenesisImageData(400, 400, 0.04, 0.25, 0.0625, 12)
def testImageName(self):
self.assertEqual(self.texture.imageName(), 'D_s=0.04-D_a=0.25-D_b=0.0625-beta_i=12.png')
def testStep(self):
self.texture.verboseStep()
self.texture.verboseStep()
self.texture.verboseStep()
self.texture.verboseStep()
self.texture.verboseStep()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 8,239,596,433,584,164,000 | 27.057018 | 116 | 0.540097 | false |
miquelo/caviar | testsuite/unit/caviar/domain/node.py | 1 | 1309 | #
# This file is part of CAVIAR.
#
# CAVIAR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CAVIAR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CAVIAR. If not, see <http://www.gnu.org/licenses/>.
#
import unittest
import caviar
import caviar.domain
import caviar.domain.node
from unittest.mock import patch
SOME_NODE_NAME = "node-01"
class NodeTestCase(unittest.TestCase):
def setUp(self):
managed_domain_context_patcher = patch(
"caviar.domain.ManagedDomainContext"
)
ManagedDomainContext = managed_domain_context_patcher.start()
self.addCleanup(managed_domain_context_patcher.stop)
managed_domain_context = ManagedDomainContext()
self.node = caviar.domain.node.Node(
managed_domain_context,
SOME_NODE_NAME
)
def test_name(self):
node_name = self.node.name
self.assertEqual(node_name, SOME_NODE_NAME)
| gpl-3.0 | -3,383,967,835,002,881,000 | 25.18 | 70 | 0.748663 | false |
Storj/downstream-farmer | tests/test_unit.py | 1 | 48524 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import unittest
import base58
import binascii
from argparse import Namespace, HelpFormatter
import json
import threading
from six.moves.urllib.error import URLError
import mock
from datetime import datetime, timedelta
from downstream_farmer import utils, shell
from downstream_farmer.utils import save, restore
from downstream_farmer.utils import (ManagedThread,
ThreadManager,
ShellApplication)
from downstream_farmer.utils import WorkChunk, LoadTracker
from downstream_farmer.farmer import Farmer
from downstream_farmer.client import DownstreamClient
from downstream_farmer.contract import DownstreamContract
from downstream_farmer.exc import DownstreamError
from heartbeat import Heartbeat
class TestUtilFunctions(unittest.TestCase):
def test_urlify(self):
test_str = "arbitrary strings 'n shit"
result = utils.urlify(test_str)
self.assertEqual('arbitrary%20strings%20%27n%20shit', result)
def test_handle_json_response(self):
m = mock.MagicMock()
m.status_code = 400
m.json.return_value = dict(message='test error')
with self.assertRaises(DownstreamError) as ex:
utils.handle_json_response(m)
self.assertEqual(str(ex.exception), 'test error')
m.json.side_effect = Exception('json processing error')
m.raise_for_status.side_effect = Exception('http error')
with self.assertRaises(Exception) as ex:
utils.handle_json_response(m)
self.assertEqual(str(ex.exception), 'http error')
m = mock.MagicMock()
m.json.return_value = dict(key='value')
result = utils.handle_json_response(m)
self.assertEqual(m.json.return_value, result)
def test_resource_path_meipass(self):
test_dir = 'testdir'
test_file = 'testfile'
setattr(sys, '_MEIPASS', test_dir)
self.assertEqual(
utils.resource_path(test_file), os.path.join(test_dir, test_file))
delattr(sys, '_MEIPASS')
def test_resource_path_default(self):
test_file = 'testfile'
default_path = os.path.join(
os.path.join(os.path.dirname(utils.__file__), 'data'),
test_file)
self.assertEqual(utils.resource_path(test_file), default_path)
def test_save_restore_parity(self):
d = {'key': 'value'}
path = 'test_file'
save(path, d)
r = restore(path)
self.assertEqual(d, r)
os.remove(path)
def test_restore_path_doesnt_exist(self):
path = 'nonexistentpath'
state = restore(path)
self.assertEqual(state, dict())
def test_save_directory_creation(self):
d = {'key': 'value'}
dir = 'testdir'
path = os.path.join(dir, 'file')
save(path, d)
self.assertTrue(os.path.isdir(dir))
self.assertTrue(os.path.exists(path))
r = restore(path)
self.assertEqual(d, r)
os.remove(path)
os.rmdir(dir)
def test_restore_parse_fail(self):
path = 'test_file'
with open(path, 'w') as f:
f.write('test contents')
with mock.patch('json.loads') as l:
l.side_effect = Exception('test error')
with self.assertRaises(DownstreamError) as ex:
restore(path)
self.assertEqual(
str(ex.exception), 'Couldn\'t parse \'{0}\': test error'
.format(path))
os.remove(path)
class TestManagedThread(unittest.TestCase):
def setUp(self):
def mock_target():
threading.current_thread().wait()
self.thread = ManagedThread(target=mock_target)
def tearDown(self):
pass
def test_init(self):
self.assertTrue(self.thread.daemon)
self.assertFalse(self.thread.attached_event.is_set())
def test_wake(self):
self.thread.start()
self.assertTrue(self.thread.is_alive())
self.thread.wake()
self.assertTrue(self.thread.attached_event.is_set())
self.thread.join()
self.assertFalse(self.thread.is_alive())
class TestThreadManager(unittest.TestCase):
def setUp(self):
self.thread_manager = ThreadManager()
def tearDown(self):
self.thread_manager.finish()
def test_init(self):
self.assertTrue(self.thread_manager.running)
self.thread_manager.finish()
self.assertFalse(self.thread_manager.running)
def test_start_thread(self):
def mock_target(manager):
manager.sleep(10)
thread = self.thread_manager.create_thread(
target=mock_target, args=(self.thread_manager,))
thread.start()
self.assertTrue(thread.is_alive())
self.thread_manager.finish()
self.assertFalse(thread.is_alive())
def test_wait_for_shutdown(self):
class MockTargetGenerator(object):
def __init__(self, manager):
self.manager = manager
def __call__(self, arg=None):
self.manager.signal_shutdown()
raise InterruptedError('test error')
with mock.patch('downstream_farmer.utils.time.sleep') as s:
s.side_effect = MockTargetGenerator(self.thread_manager)
self.thread_manager.wait_for_shutdown()
self.assertFalse(self.thread_manager.running)
def test_child_exception(self):
def mock_target():
raise Exception('test error')
thread = self.thread_manager.create_thread(
target=mock_target)
thread.start()
self.thread_manager.wait_for_shutdown()
self.assertFalse(self.thread_manager.running)
class TestShellApplication(unittest.TestCase):
def setUp(self):
self.shell_app = ShellApplication()
def test_init(self):
self.assertTrue(self.shell_app.running)
def test_signal_handler(self):
self.shell_app.signal_handler()
self.assertFalse(self.shell_app.running)
class TestWorkChunk(unittest.TestCase):
def setUp(self):
self.chunk = WorkChunk(0, 2)
def test_elapsed(self):
self.assertEqual(self.chunk.elapsed, 2)
def test_elapsed_from_start_shortened(self):
self.assertEqual(self.chunk.elapsed_from_start(1), 1)
def test_elapsed_from_start_default(self):
self.assertEqual(self.chunk.elapsed_from_start(0), 2)
class TestLoadTracker(unittest.TestCase):
def setUp(self):
self.start_time = 0
self.sample_time = 10
with mock.patch('downstream_farmer.utils.time.clock') as t:
t.return_value = self.start_time
self.tracker = LoadTracker(self.sample_time)
def test_init(self):
self.assertEqual(self.start_time, self.tracker.start)
def test_sample_start_full(self):
now = 20
with mock.patch('downstream_farmer.utils.time.clock') as t:
t.return_value = now
sample_start = self.tracker.sample_start
self.assertEqual(sample_start, now - self.sample_time)
def test_sample_start_partial(self):
now = 5
with mock.patch('downstream_farmer.utils.time.clock') as t:
t.return_value = now
sample_start = self.tracker.sample_start
self.assertEqual(sample_start, self.start_time)
def test_normal_chunks(self):
with mock.patch('downstream_farmer.utils.time.clock') as t:
# add a couple of work chunks
t.return_value = 20
self.tracker.start_work()
t.return_value = 21
self.tracker.finish_work()
t.return_value = 22
self.tracker.start_work()
t.return_value = 23
self.tracker.finish_work()
self.assertEqual(self.tracker.work_time(), 2)
self.assertAlmostEqual(self.tracker.load(), 2. / self.sample_time)
def test_early_chunks(self):
with mock.patch('downstream_farmer.utils.time.clock') as t:
t.return_value = 1.
self.tracker.start_work()
t.return_value = 2.
self.tracker.finish_work()
self.assertEqual(self.tracker.work_time(), 1)
self.assertAlmostEqual(self.tracker.load(), 1. / t.return_value)
def test_finish_before_start(self):
with self.assertRaises(RuntimeError) as ex:
self.tracker.finish_work()
self.assertEqual(str(ex.exception), 'Load tracker work chunk must be '
'started before it can be finished.')
def test_chunk_expiry(self):
with mock.patch('downstream_farmer.utils.time.clock') as t:
t.return_value = 5
self.tracker.start_work()
t.return_value = 10
self.tracker.finish_work()
t.return_value = 15
self.tracker.start_work()
t.return_value = 20
self.tracker.finish_work()
t.return_value = 25
self.tracker.start_work()
t.return_value = 29
self.tracker.finish_work()
self.assertEqual(self.tracker.work_time(), 5)
self.assertAlmostEqual(self.tracker.load(), 0.5)
def test_unfinished_work(self):
with mock.patch('downstream_farmer.utils.time.clock') as t:
t.return_value = 5
self.tracker.start_work()
t.return_value = 10
self.assertEqual(self.tracker.work_time(), 5)
self.assertAlmostEqual(self.tracker.load(), 0.5)
class MockRestore(object):
def __init__(self, table):
self.table = table
def __call__(self, arg):
return self.table[arg]
class MockRaiseOnFirstCall(object):
def __init__(self, error):
self.called = False
self.error = error
def __call__(self, arg=None):
if (not self.called):
self.called = True
raise self.error
class TestFarmer(unittest.TestCase):
def setUp(self):
self.test_args = mock.MagicMock()
self.test_args.number = None
self.test_args.node_url = 'http://testurl/'
self.test_args.api_path = '/api/downstream/v1'
self.test_args.token = 'testtoken'
self.test_args.address = 'testaddress'
self.test_args.size = 100
self.test_args.history = 'historyfile'
self.test_args.forcenew = False
self.test_args.identity = 'identityfile'
self.test_args.data_directory = os.path.join('data', 'chunks')
self.test_args.log_path = 'testlog'
def tearDown(self):
pass
def test_init_number_invalid(self):
self.test_args.number = -1
with self.assertRaises(DownstreamError) as ex:
Farmer(self.test_args)
self.assertEqual(
str(ex.exception), 'Must specify a positive number of challenges.')
def test_init_size_invalid(self):
self.test_args.size = 0
with self.assertRaises(DownstreamError) as ex:
Farmer(self.test_args)
self.assertEqual(
str(ex.exception), 'Must specify a positive size to farm.')
def test_init_forcenew(self):
self.test_args.forcenew = True
with mock.patch('downstream_farmer.utils.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
self.assertIsNone(farmer.token)
def test_init_no_token_no_address(self):
self.test_args.token = None
self.test_args.address = None
with mock.patch('downstream_farmer.utils.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'),\
self.assertRaises(DownstreamError) as ex:
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
Farmer(self.test_args)
self.assertEqual(
str(ex.exception),
'Must specify farming address if one is not available.')
def test_init_url(self):
self.test_args.node_url = 'testurl'
with mock.patch('downstream_farmer.utils.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
self.assertEqual(farmer.url, self.test_args.node_url)
self.assertEqual(
farmer.state['last_node'], self.test_args.node_url)
def test_init_url_from_state(self):
self.test_args.node_url = None
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore(
{'historyfile': {'last_node': 'stateurl'},
'identityfile': dict()})
farmer = Farmer(self.test_args)
self.assertEqual(farmer.url, 'stateurl')
def test_init_url_default(self):
self.test_args.node_url = None
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
self.assertEqual(farmer.url, 'https://live.driveshare.org:8443')
def test_init_token(self):
self.test_args.address = None
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
self.assertEqual(farmer.token, self.test_args.token)
def test_init_token_from_state(self):
self.test_args.token = None
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore({
'historyfile': {
'nodes': {
self.test_args.node_url.strip('/'): {
'token': 'statetoken',
'address': 'testaddress'
}
}
},
'identityfile': dict()})
farmer = Farmer(self.test_args)
self.assertEqual(farmer.token, 'statetoken')
def test_init_token_default(self):
self.test_args.token = None
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
self.assertEqual(farmer.token, None)
def test_init_address(self):
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
self.assertEqual(farmer.address, self.test_args.address)
def test_init_address_from_state(self):
self.test_args.address = None
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore({'historyfile': {
'nodes': {
self.test_args.node_url.strip('/'): {
'token': 'statetoken',
'address': 'stateaddress'
}
}
}, 'identityfile': dict()})
farmer = Farmer(self.test_args)
self.assertEqual(farmer.address, 'stateaddress')
def test_init_address_default(self):
self.test_args.address = None
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
self.assertEqual(farmer.address, None)
def test_init_address_from_identities(self):
self.test_args.address = None
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore({'historyfile': dict(),
'identityfile':
{'19qVgG8C6eXwKMMyvVegsi3xCsKyk3Z3jV':
{'signature': 'HyzVUenXXo4pa+kgm1v'
'S8PNJM83eIXFC5r0q86FGbqFcdla6rcw'
'72/ciXiEPfjli3ENfwWuESHhv6K9esI0'
'dl5I=', 'message':
'test message'}}})
farmer = Farmer(self.test_args)
self.assertEqual(
farmer.address, '19qVgG8C6eXwKMMyvVegsi3xCsKyk3Z3jV')
def test_load_signature_invalid_dict(self):
self.test_args.token = None
self.test_args.address = None
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore({'historyfile': dict(),
'identityfile':
{'identityaddress': {'invalid':
'dict'}}})
with self.assertRaises(DownstreamError) as ex:
Farmer(self.test_args)
self.assertEqual(str(ex.exception),
'The file format for the identity file '
'{0} should be a JSON formatted dictionary like '
'the following:\n'
' {{\n'
' "your sjcx address": {{\n'
' "message": "your message here",\n'
' "signature": "base64 signature from '
'bitcoin wallet or counterwallet",\n'
' }}\n'
' }}'.format(self.test_args.identity))
def test_load_signature_invalid_sig(self):
self.test_args.token = None
self.test_args.address = None
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'),\
mock.patch('siggy.verify_signature') as s:
s.return_value = False
r.side_effect = MockRestore({'historyfile': dict(),
'identityfile':
{'identityaddress':
{'signature': 'testsig', 'message':
'testmessage'}}})
with self.assertRaises(DownstreamError) as ex:
Farmer(self.test_args)
self.assertEqual(str(ex.exception), 'Signature provided does not'
' match address being used. '
'Check your formatting, your SJCX address, and'
' try again.')
def test_load_signature_none(self):
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch.object(Farmer, 'check_connectivity'):
r.side_effect = MockRestore({'historyfile': dict(),
'identityfile':
{'identityaddress':
{'signature': 'testsig', 'message':
'testmessage'}}})
farmer = Farmer(self.test_args)
self.assertEqual(farmer.message, '')
self.assertEqual(farmer.signature, '')
def test_check_connectivity(self):
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch('six.moves.urllib.request.urlopen') as patch:
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
patch.side_effect = URLError('Problem')
with self.assertRaises(DownstreamError):
farmer.check_connectivity()
with mock.patch('six.moves.urllib.request.urlopen') as patch:
farmer.check_connectivity()
self.assertTrue(patch.called)
def test_run(self):
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch('six.moves.urllib.request.urlopen') as patch:
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
with mock.patch('downstream_farmer.farmer.DownstreamClient') as patch,\
mock.patch('downstream_farmer.farmer.save', autospec=True) \
as s, mock.patch.object(Farmer, 'wait_for_shutdown') as w:
patch.return_value.token = 'foo'
patch.return_value.address = 'bar'
farmer.run(True)
patch.assert_called_with(
farmer.url,
farmer.token,
farmer.address,
farmer.size,
'',
'',
farmer,
farmer.chunk_dir)
patch.return_value.run_async.assert_called_with(
True, farmer.number)
self.assertTrue(w.called)
self.assertTrue(patch.return_value.connect.called)
self.assertEqual(farmer
.state['nodes'][patch.return_value
.server]['token'],
patch.return_value.token)
self.assertEqual(farmer
.state['nodes'][patch.return_value
.server]['address'],
patch.return_value.address)
self.assertTrue(s.called)
def test_run_nonexistent_token_reconnect(self):
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch('six.moves.urllib.request.urlopen') as patch:
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
with mock.patch('downstream_farmer.farmer.DownstreamClient') as patch,\
mock.patch('downstream_farmer.farmer.save', autospec=True),\
mock.patch.object(Farmer, 'wait_for_shutdown'):
patch.return_value.connect.side_effect = \
MockRaiseOnFirstCall(
DownstreamError('Unable to connect: Nonexistent token.'))
farmer.run(True)
self.assertEqual(patch.return_value.connect.call_count, 2)
def test_run_unable_to_connect(self):
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch('six.moves.urllib.request.urlopen') as patch:
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
farmer = Farmer(self.test_args)
with mock.patch('downstream_farmer.farmer.DownstreamClient') as patch:
patch.return_value.connect.side_effect = DownstreamError(
'test error')
with self.assertRaises(DownstreamError) as ex:
farmer.run(True)
self.assertEqual(str(ex.exception), 'test error')
def test_prepare_chunk_dir(self):
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch('six.moves.urllib.request.urlopen'),\
mock.patch('downstream_farmer.farmer.os') as os_patch:
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
os_patch.path.isdir.return_value = False
farmer = Farmer(self.test_args)
os_patch.mkdir.assert_called_with(farmer.chunk_dir)
def test_prepare_chunk_fail(self):
with mock.patch('downstream_farmer.farmer.restore', autospec=True) \
as r, mock.patch('six.moves.urllib.request.urlopen'),\
mock.patch('downstream_farmer.farmer.os') as os_patch:
r.side_effect = MockRestore(
{'historyfile': dict(), 'identityfile': dict()})
os_patch.path.isdir.return_value = False
os_patch.mkdir.side_effect = RuntimeError('test exception')
with self.assertRaises(DownstreamError) as ex:
Farmer(self.test_args)
self.assertEqual(
str(ex.exception),
'Chunk directory could not be created: test exception')
class TestContract(unittest.TestCase):
def setUp(self):
self.challenge = Heartbeat.challenge_type().\
fromdict(
MockValues
.get_challenges_response['challenges'][0]['challenge'])
self.heartbeat = Heartbeat.fromdict(
MockValues.connect_response['heartbeat'])
self.tag = Heartbeat.tag_type().fromdict(
MockValues.get_chunks_response['chunks'][0]['tag'])
self.expiration = datetime.utcnow(
) + timedelta(int(MockValues.get_chunks_response['chunks'][0]['due']))
self.client = mock.MagicMock()
self.manager = ThreadManager()
self.test_hash = 'hash'
self.test_size = 100
self.test_seed = 'seed'
self.contract = DownstreamContract(self.client,
self.test_hash,
self.test_seed,
self.test_size,
self.challenge,
self.expiration,
self.tag,
self.manager,
os.path.join('data', 'chunks'))
self.contract.generate_data()
def tearDown(self):
self.contract.cleanup_data()
def test_initialization(self):
self.assertEqual(self.contract.client, self.client)
self.assertEqual(self.contract.hash, self.test_hash)
self.assertEqual(self.contract.seed, self.test_seed)
self.assertEqual(self.contract.size, self.test_size)
self.assertEqual(self.contract.challenge, self.challenge)
self.assertEqual(self.contract.expiration, self.expiration)
self.assertEqual(self.contract.tag, self.tag)
self.assertEqual(self.contract.answered, False)
self.assertEqual(
self.contract.path, os.path.join('data', 'chunks', self.test_hash))
def test_repr(self):
self.assertEqual(str(self.contract), self.contract.hash)
class MockContractShutdown(object):
def __init__(self, manager):
self.manager = manager
def __call__(self, arg=None):
self.manager.running = False
class MockShutdownAndRaise(MockContractShutdown):
def __init__(self, manager, exc):
MockContractShutdown.__init__(self, manager)
self.exception = exc
def __call__(self, arg=None):
MockContractShutdown.__call__(self)
raise self.exception
class MockContractWait(object):
def __init__(self, contract):
self.contract = contract
def __call__(self, arg=None):
self.contract.time_remaining.return_value = 0
class AddContractMock(object):
def __init__(self, client, manager_to_shutdown=None):
self.client = client
self.manager = manager_to_shutdown
def __call__(self, contract):
self.client.get_total_size.return_value += contract.size
if (self.manager is not None):
self.manager.running = False
class TestClient(unittest.TestCase):
def setUp(self):
self.server_url = 'https://test.url/'
self.api_path = '/api/downstream/v1'
self.size = 100
self.address = base58.b58encode_check(b'\x00' + os.urandom(20))
self.token = binascii.hexlify(os.urandom(16)).decode('ascii')
self.msg = ''
self.sig = ''
self.thread_manager = ShellApplication()
self.contract_thread = ManagedThread()
self.chunk_dir = os.path.join('data', 'chunks')
self.client = DownstreamClient(self.server_url,
self.token,
self.address,
self.size,
self.msg,
self.sig,
self.thread_manager,
self.chunk_dir)
self.client.session = mock.MagicMock()
self.test_contract = \
DownstreamContract(self.client,
MockValues.get_chunks_response[
'chunks'][0]['file_hash'],
MockValues.get_chunks_response[
'chunks'][0]['seed'],
MockValues.get_chunks_response[
'chunks'][0]['size'],
Heartbeat.challenge_type().fromdict(
MockValues
.get_chunks_response
['chunks'][0]['challenge']),
datetime.utcnow() + timedelta(
seconds=int(
MockValues
.get_chunks_response
['chunks'][0]['due'])),
Heartbeat.tag_type().fromdict(
MockValues
.get_chunks_response
['chunks'][0]['tag']),
self.thread_manager,
self.chunk_dir)
self.test_heartbeat = Heartbeat.fromdict(
MockValues.connect_response['heartbeat'])
def tearDown(self):
pass
def test_initialization(self):
self.assertEqual(self.client.server, self.server_url.strip('/'))
self.assertEqual(self.client.address, self.address)
self.assertEqual(self.client.token, self.token)
self.assertEqual(self.client.desired_size, self.size)
self.assertIsNone(self.client.heartbeat)
def test_connect_no_token_no_address(self):
self.client.address = None
self.client.token = None
with self.assertRaises(DownstreamError) as ex:
self.client.connect()
self.assertEqual(
str(ex.exception), 'If no token is specified, address must be.')
def test_connect_failed(self):
with mock.patch('downstream_farmer.client.handle_json_response')\
as hp:
hp.side_effect = DownstreamError('test error')
with self.assertRaises(DownstreamError) as ex:
self.client.connect()
self.assertEqual(
str(ex.exception), 'Unable to connect: test error')
def test_connect_malformed(self):
inst = self.client.session.get.return_value
inst.json.return_value = {"invalid": "dict"}
with self.assertRaises(DownstreamError) as ex:
self.client.connect()
self.assertEqual(
str(ex.exception), 'Malformed response from server.')
def test_connect_invalid_heartbeat(self):
inst = self.client.session.get.return_value
inst.json.return_value = {"heartbeat": "test heartbeat",
"token": "test token",
"type": "invalid type"}
with self.assertRaises(DownstreamError) as ex:
self.client.connect()
self.assertEqual(str(ex.exception), 'Unknown Heartbeat Type')
def test_connect_working_new(self):
self.client.token = None
self.client.session.get.return_value.json.return_value \
= MockValues.connect_response
self.client.connect()
self.client.session.get.assert_called_with(
'{0}/new/{1}'.format(self.server_url.strip('/') + self.api_path,
self.address), verify=None)
self.assertEqual(
self.client.token, MockValues.connect_response['token'])
self.assertEqual(self.client.heartbeat,
Heartbeat
.fromdict(MockValues.connect_response['heartbeat']))
def test_connect_working(self):
self.client.session.get.return_value.json.return_value \
= MockValues.connect_response
self.client.connect()
self.client.session.get.assert_called_with('{0}/heartbeat/{1}'.format(
self.server_url.strip('/') + self.api_path, self.token),
verify=None)
self.assertEqual(
self.client.token, MockValues.connect_response['token'])
self.assertEqual(self.client.heartbeat,
Heartbeat
.fromdict(MockValues.connect_response['heartbeat']))
def test_connect_sign(self):
self.client.msg = 'test message'
self.client.sig = 'HyzVUenXXo4pa+kgm1vS8PNJM83eIXFC5r0q86FGbqFcdla6rcw'
'72/ciXiEPfjli3ENfwWuESHhv6K9esI0dl5I='
self.client.address = '19qVgG8C6eXwKMMyvVegsi3xCsKyk3Z3jV'
self.client.token = None
self.client.session.post.return_value.json.return_value \
= MockValues.connect_response
self.client.connect()
self.client.session.post.\
assert_called_with(
'{0}/new/{1}'
.format(self.server_url.strip('/') + self.api_path,
self.client.address),
data=json.dumps({
"message": self.client.msg,
"signature": self.client.sig
}),
headers={
'Content-Type': 'application/json'
},
verify=None)
self.assertEqual(
self.client.token, MockValues.connect_response['token'])
self.assertEqual(self.client.heartbeat,
Heartbeat.fromdict(MockValues
.connect_response['heartbeat']))
def test_get_contract_no_token(self):
with mock.patch('downstream_farmer.client.handle_json_response')\
as hp:
hp.side_effect = DownstreamError('test error')
with self.assertRaises(DownstreamError) as ex:
self.client._get_contracts()
self.assertEqual(
str(ex.exception), 'Unable to get contracts: test error')
def test_get_contract_malformed(self):
patch = self.client.session.get
patch.return_value.json.return_value = {"invalid": "dict"}
with self.assertRaises(DownstreamError) as ex:
self.client._get_contracts()
self.assertEqual(
str(ex.exception), 'Malformed response from server.')
def test_get_contracts_working(self):
self.client.heartbeat = self.test_heartbeat
patch = self.client.session.get
inst = patch.return_value
inst.json.return_value = MockValues.get_chunks_response.copy()
contracts = self.client._get_contracts(100)
self.assertEqual(
contracts[0].hash, self.test_contract.hash)
self.assertEqual(
contracts[0].seed, self.test_contract.seed)
self.assertEqual(
contracts[0].size, self.test_contract.size)
self.assertEqual(
contracts[0].challenge, self.test_contract.challenge)
self.assertAlmostEqual((contracts[0].expiration -
self.test_contract.expiration)
.total_seconds(), 0, delta=1)
self.assertEqual(contracts[0].tag, self.test_contract.tag)
def test_get_contract_no_chunks_available(self):
self.client.heartbeat = self.test_heartbeat
with mock.patch(
'downstream_farmer.client.handle_json_response') as hpatch:
hpatch.return_value = dict(chunks=[])
contracts = self.client._get_contracts()
self.assertEqual(len(contracts), 0)
def setup_run_mocks(self):
self.client.thread_manager = mock.MagicMock()
self.client.thread_manager.running = True
self.client.worker_pool = mock.MagicMock()
self.client._get_contracts = mock.MagicMock()
self.client._get_contracts.return_value = [self.test_contract]
self.client._add_contract = mock.MagicMock()
self.client.contract_thread = mock.MagicMock()
self.client.get_total_size = mock.MagicMock()
self.client.get_total_size.return_value = 0
def test_run_contract_manager(self):
self.setup_run_mocks()
self.client.thread_manager.sleep.side_effect = \
MockContractShutdown(self.client.thread_manager)
self.client._add_contract.side_effect = AddContractMock(self.client)
self.client._run_contract_manager()
self.assertFalse(self.client.thread_manager.signal_shutdown.called)
self.client._add_contract.assert_called_with(self.test_contract)
def test_run_contract_manager_obtain_fail_no_retry(self):
self.setup_run_mocks()
self.client.contract_count = mock.MagicMock()
self.client.contract_count.return_value = 0
self.client._get_contracts.side_effect = DownstreamError('test error')
self.client.thread_manager.signal_shutdown.side_effect = \
MockContractShutdown(self.client.thread_manager)
with self.assertRaises(DownstreamError) as ex:
self.client._run_contract_manager()
self.assertEqual(str(ex.exception), 'test error')
def test_run_contract_manager_obtain_fail_retry(self):
self.setup_run_mocks()
self.client.contract_count = mock.MagicMock()
self.client.contract_count.return_value = 0
self.client._get_contracts.side_effect = \
[DownstreamError('test error'), [self.test_contract]]
self.client._add_contract.side_effect = AddContractMock(self.client)
self.client.thread_manager.sleep.side_effect = \
MockContractShutdown(self.client.thread_manager)
self.client._run_contract_manager(True)
self.assertFalse(self.client.thread_manager.signal_shutdown.called)
def test_run_contract_manager_shutdown_during_acquisition(self):
self.setup_run_mocks()
self.client._add_contract.side_effect = \
AddContractMock(self.client, self.client.thread_manager)
self.client._run_contract_manager()
self.assertFalse(self.client.thread_manager.signal_shutdown.called)
def test_run_contract_manager_number_requirement(self):
self.setup_run_mocks()
self.client._add_contract.side_effect = \
AddContractMock(self.client)
self.client.heartbeat_count = 1
self.client.thread_manager.signal_shutdown.side_effect = \
MockContractShutdown(self.client.thread_manager)
self.client.desired_heartbeats = 1
self.client._run_contract_manager()
self.assertTrue(self.client.thread_manager.signal_shutdown.called)
def test_run_async(self):
self.client.thread_manager = mock.MagicMock()
self.contract_thread = mock.MagicMock()
self.client.thread_manager.create_thread.return_value = \
self.contract_thread
self.client.run_async()
self.assertTrue(self.contract_thread.start.called)
def test_set_cert_path(self):
test_path = 'testpath'
self.client._set_requests_verify_arg = mock.MagicMock()
self.client.set_cert_path(test_path)
self.assertEqual(self.client.cert_path, test_path)
self.assertTrue(self.client._set_requests_verify_arg.called)
def test_set_verify_cert(self):
val = not self.client.verify_cert
self.client._set_requests_verify_arg = mock.MagicMock()
self.client.set_verify_cert(val)
self.assertEqual(self.client.verify_cert, val)
self.assertTrue(self.client._set_requests_verify_arg.called)
def test_set_requests_verify_arg_false(self):
self.client.verify_cert = False
self.client.requests_verify_arg = True
self.client._set_requests_verify_arg()
self.assertFalse(self.client.requests_verify_arg)
class TestExceptions(unittest.TestCase):
def test_downstream_error(self):
e = DownstreamError('Test Exception')
self.assertEqual(str(e), 'Test Exception')
class TestSmartFormatter(unittest.TestCase):
def setUp(self):
self.width = 80
def test_raw(self):
formatter = shell.SmartFormatter(None)
raw_string = 'R|sample text blah this is a long piece of '\
'text that should be split over lines but wont be because its raw'\
' text blah!!! and will split over new lines\nthis should be a '\
'on a new line'
value = formatter._split_lines(raw_string, self.width)
self.assertEqual(value, raw_string[2:].splitlines())
def test_normal(self):
formatter = shell.SmartFormatter(None)
raw_string = 'This is a raw string that will be split over lines '\
'because it will go into the HelpFormatter. but This string '\
'needs to be longer'\
'than 80 chars to split on the lines'
value = formatter._split_lines(raw_string, self.width)
self.assertEqual(
value, HelpFormatter._split_lines(formatter,
raw_string,
self.width))
class TestShell(unittest.TestCase):
def setUp(self):
self._old_argv = sys.argv
sys.argv = [
'downstream'
]
def tearDown(self):
sys.argv = self._old_argv
def test_fail_exit(self):
with self.assertRaises(SystemExit):
shell.fail_exit('Test')
def test_eval_args_run(self):
with mock.patch('downstream_farmer.shell.Farmer') as farmer:
shell.eval_args(mock.MagicMock())
self.assertTrue(farmer.called)
self.assertTrue(farmer.return_value.run.called)
def test_eval_args_downstream_error(self):
with mock.patch('downstream_farmer.shell.Farmer') as farmer:
farmer.side_effect = DownstreamError('error')
with self.assertRaises(SystemExit):
shell.eval_args(None)
def test_eval_args_exception(self):
with mock.patch('downstream_farmer.shell.Farmer') as farmer:
farmer.side_effect = Exception('error')
with self.assertRaises(SystemExit):
shell.eval_args(None)
def test_eval_args_catchall(self):
with mock.patch('downstream_farmer.shell.Farmer') as farmer:
farmer.side_effect = BaseException('error')
with self.assertRaises(SystemExit):
shell.eval_args(None)
def test_parse_args(self):
args = shell.parse_args()
self.assertIsInstance(args, Namespace)
def test_parse_args_version(self):
with self.assertRaises(SystemExit):
sys.argv.append('--version')
shell.parse_args()
def test_parse_args_number(self):
sys.argv.append('--number')
sys.argv.append('1')
args = shell.parse_args()
self.assertEqual(args.number, int(sys.argv[2]))
def test_parse_args_number_default(self):
args = shell.parse_args()
self.assertEqual(args.number, None)
def test_parse_args_path(self):
sys.argv.append('--history')
sys.argv.append('testpath')
args = shell.parse_args()
self.assertEqual(args.history, sys.argv[2])
def test_parse_args_history_default(self):
args = shell.parse_args()
self.assertEqual(args.history, os.path.join('data', 'history.json'))
def test_parse_args_size(self):
sys.argv.append('--size')
sys.argv.append('10')
args = shell.parse_args()
self.assertEqual(args.size, int(sys.argv[2]))
def test_parse_args_size_default(self):
args = shell.parse_args()
self.assertEqual(args.size, 33554432)
def test_parse_args_address(self):
sys.argv.append('--address')
sys.argv.append('testaddress')
args = shell.parse_args()
self.assertEqual(args.address, sys.argv[2])
def test_parse_args_address_default(self):
args = shell.parse_args()
self.assertEqual(args.address, None)
def test_parse_args_token(self):
sys.argv.append('--token')
sys.argv.append('testtoken')
args = shell.parse_args()
self.assertEqual(args.token, sys.argv[2])
def test_parse_args_token_default(self):
args = shell.parse_args()
self.assertEqual(args.token, None)
def test_parse_args_url(self):
sys.argv.append('testurl')
args = shell.parse_args()
self.assertEqual(args.node_url, sys.argv[1])
def test_parse_args_url_default(self):
args = shell.parse_args()
self.assertEqual(args.node_url, None)
def test_main(self):
with mock.patch('downstream_farmer.shell.parse_args') as pa:
with mock.patch('downstream_farmer.shell.eval_args') as ea:
shell.main()
self.assertTrue(pa.called)
self.assertTrue(ea.called)
class MockValues:
connect_response = {
"heartbeat": "AQoAAACAAAAAgAAAAJCTCchnuw8nE9FbjUyJVNNzjQumBHHw7iFL5Ply"
"4vHQvkqOqcgc5XKXgWVaJGCs1F+oI68zL9Ir9+q0BkA5WadDq5uz0Cot"
"sY8Pad8UemCLvLGNlnkavsbn0dXk7/0QL5KYGardu9m5zWtQEagdvl86"
"tSbksec1B5Y9K1S5hGlr",
"token": "b45a3e2932c87474cb1bd7e642cf792b",
"type": "Swizzle"
}
get_chunks_response = {"chunks": [{
"challenge": "AQAAACAAAACJwjEuYPkbnGOppNVgG0Xc5GKgp0g2kGN2bMCssbMBwIAA"
"AACQkwnIZ7sPJxPRW41MiVTTc40LpgRx8O4hS+T5cuLx0L5KjqnIHOVy"
"l4FlWiRgrNRfqCOvMy/SK/fqtAZAOVmnQ6ubs9AqLbGPD2nfFHpgi7yx"
"jZZ5Gr7G59HV5O/9EC+SmBmq3bvZuc1rUBGoHb5fOrUm5LHnNQeWPStU"
"uYRpaw==",
"due": "60",
"file_hash": "89ca8e5f02e64694bf889d49a9b7986f201900e6637e0e7349282a85"
"91ce7732",
"seed": "eb1bb0f7cd24720d456193cca8c42edb",
"size": 100,
"tag": "AQAAAIAAAABqXU8BK1mOXFG0mK+X1lWNZ39AmYe1M4JsbIz36wC0PvvcWY+URw"
"+BYBlFk5N1+X5VI4F+3sDYYy0jE7mgVCh7kNnOZ/mAYtffFh7izOOS4HHuzWIm"
"cOgaVeBL0/ngSPLPYUhFF5uTzKoYUr+SheQDYcuOCg8qivXZGOL6Hv1WVQ=="
}]}
get_challenges_response = {"challenges": [{
"file_hash": "89ca8e5f02e64694bf889d49a9b7986f201900e6637e0e7349282a85"
"91ce7732",
"challenge": "AQAAACAAAAAs/0pRrQ00cWS86II/eAufStyqrjf0wSJ941EjtrLo94AA"
"AABSnAK49Tm7F/4HkQuvdJj1WdisL9OEuMMl9uYMxIp8aXvDqkI/NP4r"
"ix6rREa1Jh6pvH6Mb4DpVHEjDMzVIOKEKV8USKndUq2aNiYf2NqQ1Iw0"
"XkNFsoSgZD10miN8YtatUNu+8gUkT6cv54DUrruo9JTIpXsIqu0BNifu"
"FU58Vw==",
"due": "60",
"answered": True
}]}
| mit | -470,276,516,766,062,660 | 38.904605 | 79 | 0.578374 | false |
PMR2/pmr2.oauth | pmr2/oauth/interfaces.py | 1 | 12747 | import zope.interface
import zope.schema
# XXX exceptions from upstream
from oauthlib.oauth1.rfc5849.errors import OAuth1Error
from pmr2.oauth import MessageFactory as _
class KeyExistsError(KeyError):
__doc__ = "key exists error"
class BaseInvalidError(KeyError):
__doc__ = "base invalid error."
class TokenInvalidError(BaseInvalidError):
__doc__ = "invalid token."
class ConsumerInvalidError(BaseInvalidError):
__doc__ = "invalid client."
class RequestInvalidError(BaseInvalidError):
__doc__ = "invalid request."
class BaseValueError(ValueError):
__doc__ = "basic value error"
class NotRequestTokenError(TokenInvalidError):
__doc__ = "Not request token"
class NotAccessTokenError(TokenInvalidError):
__doc__ = "Not access token"
class ExpiredTokenError(TokenInvalidError):
__doc__ = "Expired token"
class CallbackValueError(BaseValueError):
__doc__ = "callback value error"
class NonceValueError(BaseValueError):
__doc__ = "nonce value error"
class InvalidScopeError(BaseValueError):
__doc__ = "invalid scope."
class IOAuthRequestValidatorAdapter(zope.interface.Interface):
"""
Interface for the OAuth adapter.
"""
def __call__():
"""
Return a boolean value to determine whether access was granted.
"""
class IOAuthPlugin(zope.interface.Interface):
"""\
The OAuth plugin.
"""
def extractOAuthCredentials(request):
"""\
Extract the OAuth credentials from the request, for processing
by Plone PAS.
"""
class IConsumer(zope.interface.Interface):
"""\
An OAuth client credential.
"""
key = zope.schema.ASCIILine(
title=_(u'Client Identifier'),
description=_(u'The unique identifier for this client'),
required=True,
)
secret = zope.schema.ASCIILine(
title=_(u'Client Shared-Secret'),
description=_(u'The secret that is shared between the client and the '
'service provider.'),
required=True,
)
title = zope.schema.TextLine(
title=_(u'Client Name'),
description=_(u'This is the name of the application that will be '
'using this set of client credentials, and serves as '
'the identifier that will be presented to resource '
'owners during the authorization process.'),
required=False,
)
domain = zope.schema.TextLine(
title=_(u'Domain Name'),
description=_(u'If this client is able to receive callbacks, please '
'enter its doamin name here as callbacks will be '
'validated against this value. Otherwise leave this as '
'blank.'),
required=False,
)
def validate():
"""
Self validation.
"""
class IConsumerManager(zope.interface.Interface):
"""\
Interface for the client management.
"""
def add(consumer):
"""\
Add a client.
"""
def check(consumer):
"""\
Check for validity of input client.
"""
def get(consumer_key, default=None):
"""\
Return client, identified by consumer_key.
"""
def getAllKeys():
"""\
Return all client keys tracked by this client manager.
"""
def getValidated(consumer_key, default=None):
"""\
Return a client only if it is a validated one.
This will be used when possible to allow further checks by
alternative implementations.
"""
def remove(consumer):
"""\
Remove client.
"""
class IScopeManager(zope.interface.Interface):
"""\
Scope Manager
A manager that simplifies the handling of scopes, which place limits
on what an authenticated token can access.
"""
def setScope(key, scope):
"""
Set a scope identified by key.
"""
def getScope(key, scope):
"""
Get a scope identified by key.
"""
def popScope(key, scope):
"""
Pop out a scope identified by key
"""
def setClientScope(client_key, scope):
"""
Set the scope provided by client, referenced by client_key.
"""
def setAccessScope(access_key, scope):
"""
Set the scope provided by access, referenced by access_key.
"""
def getClientScope(client_key, default):
"""
Get the scope for the provided client_key.
"""
def getAccessScope(access_key, default):
"""
Get the scope for the provided access_key.
"""
def delClientScope(client_key):
"""
Delete the scope for the provided client_key.
"""
def delAccessScope(access_key):
"""
Delete the scope for the provided access_key.
"""
def requestScope(request_key, rawscope):
"""
Request a scope for the temporary credentials identified by the
``request_key``.
request_key
the generated request key.
rawscope
the raw scope string sent by the client.
Return True if the rawscope is successfully stored as a scope
with the request_key, False otherwise.
The actual scope object can be retrieved by calling
`self.getScope(request_key)` if this was successful.
"""
def validate(request, client_key, access_key,
accessed, container, name, value):
"""
Validate the scope against the given context with the given
client and owner.
request
the request object.
client_key
the client (consumer) key.
access_key
the access key identifying a given token granted by a
resource owner.
accessed
the immediate object accessed by the client before the
value
container
the real container of the value
name
the name used by the client to access the value.
value
the value accessed by the client.
The latter four fields are normally called by keywords.
"""
class IDefaultScopeManager(IScopeManager):
"""
Marker interface for the default scope manager.
"""
class IContentTypeScopeManager(IScopeManager):
"""
A scope manager based on content types.
A scope manager validates the requested object and the name with a
content type profile specific to the client and/or resource access
key if available, or the default profile if not.
"""
default_mapping_id = zope.schema.Int(
title=_(u'Default Mapping ID'),
required=True,
default=0,
)
def resolveProfile(client_key, access_key):
"""
Reolve the provided client_key and access_key into a validation
profile.
"""
def resolveTarget(accessed, name):
"""
Resolve the accessed item and name into a target for the next
method.
"""
def validateTargetWithProfile(accessed_typeid, subpath, profile):
"""
The scope value will resolve into a profile which is used to
validate against the provided parameters.
"""
class IContentTypeScopeProfile(zope.interface.Interface):
"""
Interface for the scope profile and editor.
"""
title = zope.schema.TextLine(
title=_(u'Title'),
description=_(
u'Brief description about this scope profile.'),
required=True,
)
description = zope.schema.Text(
title=_(u'Description'),
description=_(
u'Detailed description of the rights granted by this scope.'),
required=False,
)
methods = zope.schema.ASCIILine(
title=_(u'Permitted HTTP Methods'),
description=_(
u'Whitespace delimited list of permitted HTTP methods for the '
'subpaths below.'),
required=True,
default='GET HEAD OPTIONS',
)
mapping = zope.schema.Dict(
title=_(u'Mapping'),
description=_(u'A mapping for each of the following portal types to '
'a list of permitted subpaths.'),
key_type=zope.schema.ASCIILine(
title=_(u'Portal Type')
),
value_type=zope.schema.List(
title=_(u'Permitted subpaths'),
value_type=zope.schema.ASCIILine(title=_(u'Subpath')),
required=False,
),
)
class IToken(zope.interface.Interface):
"""\
An OAuth token.
"""
key = zope.schema.ASCIILine(
title=_(u'Key'),
description=_(u'Token key'),
required=True,
)
secret = zope.schema.ASCIILine(
title=_(u'Secret'),
description=_(u'Token secret'),
required=True,
)
callback = zope.schema.ASCIILine(
title=_(u'Callback'),
required=False,
)
verifier = zope.schema.ASCIILine(
title=_(u'Verifier'),
required=True,
)
# other requirements
access = zope.schema.Bool(
title=_(u'Access Permitted'),
description=_(u'Determines if this can be used to access content.'),
default=False,
required=True,
)
user = zope.schema.ASCIILine(
title=_(u'User ID'),
description=_(u'The user id associated with this token.'),
required=False,
default=None,
)
consumer_key = zope.schema.ASCIILine(
title=_(u'Client Key'),
description=_(u'The client key associated with this token'),
required=False,
default=None,
)
timestamp = zope.schema.Int(
title=_(u'Timestamp'),
description=_(u'Creation timestamp of this token'),
)
expiry = zope.schema.Int(
title=_(u'Expiry'),
description=_(u'Expiry timestamp for this token'),
)
def validate():
"""
Self validation.
"""
class ITokenManager(zope.interface.Interface):
"""\
Token manager utility
"""
def add(token):
"""\
Add a token.
"""
def generateRequestToken(consumer, request):
"""\
Generate a request token, using client and request.
"""
def generateAccessToken(consumer, request):
"""\
Generate an access token.
"""
def claimRequestToken(token, user):
"""\
Token claimed by user.
"""
def get(token, default=None):
"""\
Get a token by token.
Input could be a token, or a key. Returns the same token
identified by the key of the input token or the input key.
"""
def getRequestToken(token, default=False):
"""\
Return request token identified by token.
Raises NotRequestTokenError when token is not an access token.
Raises InvalidTokenError if internal consistency (invariants)
are violated.
If token is not found and default value is false,
InvalidTokenError should be raised also.
"""
def getAccessToken(token, default=False):
"""\
Return access token identified by token.
Raises NotAccessTokenError when token is not an access token.
Raises InvalidTokenError if internal consistency (invariants)
are violated.
If token is not found and default value is false,
InvalidTokenError should be raised also.
"""
def getTokensForUser(user):
"""\
Return a list of token keys for a user.
"""
def remove(token):
"""\
Remove token.
"""
# Other management interfaces
class ICallbackManager(zope.interface.Interface):
"""
Callback manager.
Used to verify the validity of callback URIs.
"""
def validate(consumer, token):
"""
Check that the callbacks are valid against both the client and
the token. A more thorough implementation should allow multiple
hosts for clients, matching against the tokens issued, instead
of just relying on the helper attribute provided by client.
token
The token to validate against.
consumer
The client to validate against.
"""
class INonceManager(zope.interface.Interface):
"""\
Nonce manager.
If nonce must be checked specifically, implement this manager.
"""
def check(timestamp, nonce):
"""\
Check that this nonce can be used.
"""
class _IDynamicSchemaInterface(zope.interface.Interface):
"""
Placeholder
"""
| gpl-2.0 | -4,668,294,694,185,866,000 | 23.560694 | 79 | 0.593787 | false |
RianFuro/vint | vint/linting/formatter/formatter.py | 1 | 2751 | from pathlib import Path
from ansicolor import Colors, colorize
DEFAULT_FORMAT = '{file_path}:{line_number}:{column_number}: {description} (see {reference})'
FORMAT_COLOR_MAP = {
'file_path': Colors.Red,
'file_name': Colors.Red,
'line_number': Colors.White,
'column_number': Colors.White,
'severity': Colors.Red,
'description': Colors.White,
'policy_name': Colors.White,
'reference': Colors.White,
}
class Formatter(object):
def __init__(self, config_dict):
if 'cmdargs' in config_dict:
cmdargs = config_dict['cmdargs']
else:
cmdargs = {}
if 'format' in cmdargs and cmdargs['format'] is not None:
self._format = cmdargs['format']
else:
self._format = DEFAULT_FORMAT
if 'color' in cmdargs and cmdargs['color'] is not None:
self._should_be_colorized = cmdargs['color']
else:
self._should_be_colorized = False
def _sort_violations(self, violations):
return sorted(violations, key=lambda violation: (violation['position']['path'],
violation['position']['line']))
def format_violations(self, violations):
sorted_violations = self._sort_violations(violations)
formatted_lines = map(self.format_violation, sorted_violations)
return '\n'.join(formatted_lines)
def format_violation(self, violation):
if self._should_be_colorized:
formatter_map = self._get_colorize_formatter_map(violation)
else:
formatter_map = self._get_formatter_map(violation)
formatted_line = self._format.format(**formatter_map)
return formatted_line
def _get_colorize_formatter_map(self, violation):
formatter_map = self._get_formatter_map(violation)
colorized_formatter_map = {}
for key, value in formatter_map.items():
if key in FORMAT_COLOR_MAP:
Color = FORMAT_COLOR_MAP[key]
colorized_formatter_map[key] = colorize(str(value), Color())
else:
colorized_formatter_map[key] = value
return colorized_formatter_map
def _get_formatter_map(self, violation):
file_path = Path(violation['position']['path'])
return {
'file_path': file_path,
'file_name': file_path.name,
'line_number': violation['position']['line'],
'column_number': violation['position']['column'],
'severity': violation['level'].name.lower(),
'description': violation['description'],
'policy_name': violation['name'],
'reference': violation['reference'],
}
| mit | -2,305,773,993,742,480,000 | 31.364706 | 93 | 0.588513 | false |
manashmndl/LearningPyQt | LatexTest/LatexTest/LatexTest.py | 1 | 4860 | import sys
import matplotlib as mpl
from matplotlib.backends.backend_agg import FigureCanvasAgg
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import *
from PyQt4.QtGui import *
def mathTex_to_QPixmap(mathTex, fs):
#---- set up a mpl figure instance ----
fig = mpl.figure.Figure()
fig.patch.set_facecolor('none')
fig.set_canvas(FigureCanvasAgg(fig))
renderer = fig.canvas.get_renderer()
#---- plot the mathTex expression ----
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
ax.patch.set_facecolor('none')
t = ax.text(0, 0, mathTex, ha='left', va='bottom', fontsize=fs)
#---- fit figure size to text artist ----
fwidth, fheight = fig.get_size_inches()
fig_bbox = fig.get_window_extent(renderer)
text_bbox = t.get_window_extent(renderer)
tight_fwidth = text_bbox.width * fwidth / fig_bbox.width
tight_fheight = text_bbox.height * fheight / fig_bbox.height
fig.set_size_inches(tight_fwidth, tight_fheight)
#---- convert mpl figure to QPixmap ----
buf, size = fig.canvas.print_to_buffer()
qimage = QtGui.QImage.rgbSwapped(QtGui.QImage(buf, size[0], size[1],
QtGui.QImage.Format_ARGB32))
qpixmap = QtGui.QPixmap(qimage)
return qpixmap
def window(latex, size = 15):
app = QApplication(sys.argv)
win = QWidget()
l1 = QLabel()
l1.setPixmap(mathTex_to_QPixmap(latex , size))
vbox = QVBoxLayout()
vbox.addWidget(l1)
win.setLayout(vbox)
win.setWindowTitle("QPixmap Demo")
win.show()
sys.exit(app.exec_())
class MyQTableWidget(QtGui.QTableWidget):
def __init__(self, parent=None):
super(MyQTableWidget, self).__init__(parent)
self.setHorizontalHeader(MyHorizHeader(self))
def setHorizontalHeaderLabels(self, headerLabels, fontsize):
qpixmaps = []
indx = 0
for labels in headerLabels:
qpixmaps.append(mathTex_to_QPixmap(labels, fontsize))
self.setColumnWidth(indx, qpixmaps[indx].size().width() + 16)
indx += 1
self.horizontalHeader().qpixmaps = qpixmaps
super(MyQTableWidget, self).setHorizontalHeaderLabels(headerLabels)
class MyHorizHeader(QtGui.QHeaderView):
def __init__(self, parent):
super(MyHorizHeader, self).__init__(QtCore.Qt.Horizontal, parent)
self.setClickable(True)
self.setStretchLastSection(True)
self.qpixmaps = []
def paintSection(self, painter, rect, logicalIndex):
if not rect.isValid():
return
#------------------------------ paint section (without the label) ----
opt = QtGui.QStyleOptionHeader()
self.initStyleOption(opt)
opt.rect = rect
opt.section = logicalIndex
opt.text = ""
#---- mouse over highlight ----
mouse_pos = self.mapFromGlobal(QtGui.QCursor.pos())
if rect.contains(mouse_pos):
opt.state |= QtGui.QStyle.State_MouseOver
#---- paint ----
painter.save()
self.style().drawControl(QtGui.QStyle.CE_Header, opt, painter, self)
painter.restore()
#------------------------------------------- paint mathText label ----
qpixmap = self.qpixmaps[logicalIndex]
#---- centering ----
xpix = (rect.width() - qpixmap.size().width()) / 2. + rect.x()
ypix = (rect.height() - qpixmap.size().height()) / 2.
#---- paint ----
rect = QtCore.QRect(xpix, ypix, qpixmap.size().width(),
qpixmap.size().height())
painter.drawPixmap(rect, qpixmap)
def sizeHint(self):
baseSize = QtGui.QHeaderView.sizeHint(self)
baseHeight = baseSize.height()
if len(self.qpixmaps):
for pixmap in self.qpixmaps:
baseHeight = max(pixmap.height() + 8, baseHeight)
baseSize.setHeight(baseHeight)
self.parentWidget().repaint()
return baseSize
if __name__ == '__main__':
lat = '$C_{soil}=(1 - n) C_m + \\theta_w C_w$'
window(lat, 35)
#app = QtGui.QApplication(sys.argv)
#w = MyQTableWidget()
#w.verticalHeader().hide()
#headerLabels = [
# '$C_{soil}=(1 - n) C_m + \\theta_w C_w$',
# '$k_{soil}=\\frac{\\sum f_j k_j \\theta_j}{\\sum f_j \\theta_j}$',
# '$\\lambda_{soil}=k_{soil} / C_{soil}$',
# '$a = \\frac{m}{F}$'
# ]
#w.setColumnCount(len(headerLabels))
#w.setHorizontalHeaderLabels(headerLabels, 25)
#w.setRowCount(3)
#w.setAlternatingRowColors(True)
#k = 1
#for j in range(3):
# for i in range(3):
# w.setItem(i, j, QtGui.QTableWidgetItem('Value %i' % (k)))
# k += 1
#w.show()
#w.resize(700, 200)
#sys.exit(app.exec_()) | mit | -8,854,713,075,863,133,000 | 26.308989 | 78 | 0.574074 | false |
pkuzc/convex_optimization | ADMM_method.py | 1 | 5123 | class ADMM_method():
import numpy as np
def __init__(self, A, b, mu, init_iteration, max_iteration, tol):
self.A = A
self.AT = self.A.T
self.b = b
self.m, self.n = self.A.shape
self.mu = mu
self.init_iteration = init_iteration
self.max_iteration = max_iteration
self.tol = tol
self.AAT = np.dot(self.A, self.AT)
self.ATb = np.dot(self.A.T, self.b)
self.cov = np.dot(self.AT, self.A)
self.step_size = 1.0/np.linalg.norm(self.cov, 2)
self.coef = np.linalg.inv(np.eye(m) + 1.0*self.AAT)
self.result_path = []
def loss(self, x):
x = x.reshape(-1)
return 0.5 * np.sum(np.square(np.dot(A, x) - b)) + mu * np.sum(np.abs(x))
def train(self, method="dual"):
import time
start_time = time.time()
print method + ' is Solving...'
if method == "dual":
# initial weights
self.y = np.random.normal(size=(self.m))
self.z = np.dot(self.AT, self.y)
self.x = np.zeros(self.n)
def proj_inf_norm(z, uu):
v = 1.0*z[:]
v[z>=uu] = 1.0*uu
v[z<=-uu] = -1.0*uu
return v
def update(y, z, w, uu, t):
z = np.dot(self.AT, y) + w/t
z = proj_inf_norm(z, uu)
y = np.dot(self.coef, self.b + t*np.dot(self.A, z - w/t))
w = w + t*(np.dot(self.AT, y) - z)
return y, z, w
self.iters = 1
self.err_rate = 1.0
new_max_iteration = self.max_iteration + 6 * self.init_iteration
while(self.err_rate > self.tol and self.iters < new_max_iteration):
self.result_path.append(self.loss(self.x))
x_ = self.x
self.y, self.z, self.x = update(self.y, self.z, self.x, self.mu, t=1.0)
self.err_rate = np.abs(self.loss(self.x)-self.loss(x_))/self.loss(x_)
self.iters += 1
elif method == "dal":
# initial weights
self.y = np.random.normal(size=(self.m))
self.z = np.dot(self.AT, self.y)
self.x = np.zeros(self.n)
def proj_inf_norm(z, uu):
v = 1.0*z[:]
v[z>=uu] = 1.0*uu
v[z<=-uu] = -1.0*uu
return v
def update(y, z, w, uu, t):
for i in range(2):
z = np.dot(self.AT, y) + w/t
z = proj_inf_norm(z, uu)
y = np.dot(self.coef, self.b + t*np.dot(self.A, z - w/t))
w = w + t*(np.dot(self.AT, y) - z)
return y, z, w
self.iters = 1
self.err_rate = 1.0
new_max_iteration = self.max_iteration + 6 * self.init_iteration
while(self.err_rate > self.tol and self.iters < new_max_iteration):
self.result_path.append(self.loss(self.x))
x_ = self.x
self.y, self.z, self.x = update(self.y, self.z, self.x, self.mu, t=1.0)
self.err_rate = np.abs(self.loss(self.x)-self.loss(x_))/self.loss(x_)
self.iters += 1
elif method == "linear":
# initial weights
self.x = np.random.normal(size=(self.n))
self.y = np.dot(self.A, self.x)
self.z = np.zeros(self.m)
def soft_thresholding(x, h):
y = 1.0*x[:]
y[x>=h] = 1.0*(y[x>=h] - h)
y[x<=-h] = 1.0*(y[x<=-h] + h)
y[np.abs(x)<=h] = 0.0
return y
def update(x, y, z, u, t):
grad = t*np.dot(self.cov, x) - t*np.dot(self.AT, self.b + y - z/t)
x = soft_thresholding(x - self.step_size * grad, self.step_size * u)
y = (t*np.dot(self.A, x) + z - t*self.b)/(1.0 + t)
z = z + t*(np.dot(self.A, self.x) - b - y)
return x, y, z
for hot_mu in [1e3, 1e2, 1e1, 1e-1, 1e-2, 1e-3]:
for k in range(self.init_iteration):
self.x, self.y, self.z = update(self.x, self.y, self.z, hot_mu, t=1.0)
self.result_path.append(self.loss(self.x))
self.iters = 1
self.err_rate = 1.0
while(self.err_rate > self.tol and self.iters < self.max_iteration):
self.result_path.append(self.loss(self.x))
x_ = self.x
self.x, self.y, self.z = update(self.x, self.y, self.z, hot_mu, t=1.0)
self.err_rate = np.abs(self.loss(self.x)-self.loss(x_))/self.loss(x_)
self.iters += 1
else:
print "Such method is not yet supported!!"
self.run_time = time.time() - start_time
print 'End!'
def plot(self, method='dual'):
from bokeh.plotting import figure, output_file, show
x = range(len(self.result_path))
y = self.result_path
output_file("./admm_"+method+".html")
p = figure(title="ADMM Method_"+method, x_axis_label='iteration', y_axis_label='loss')
p.line(x, y, legend=method, line_width=2)
show(p)
if __name__ == '__main__':
import numpy as np
from bokeh.plotting import figure, output_file, show
# for reproducibility
np.random.seed(1337)
n = 1024
m = 512
mu = 1e-3
init_iteration = int(1e3)
max_iteration = int(1e3)
tol = 1e-9
# Generating test matrices
A = np.random.normal(size=(m, n))
u = np.random.normal(size=(n)) * np.random.binomial(1, 0.1, (n))
b = np.dot(A, u).reshape(-1)
result_time = []
result_mse = []
output_file("./ADMM.html")
p = figure(title="ADMM Method", x_axis_label='iteration', y_axis_label='loss')
for method, color in zip(["dual", "dal", "linear"],["orange", "red", "blue"]):
model = ADMM_method(A, b, mu, init_iteration, max_iteration, tol)
model.train(method)
result_time.append(model.run_time)
result_mse.append(np.mean(np.square(model.x-u)))
x = range(len(model.result_path))
y = model.result_path
p.line(x, y, legend=method, line_width=2, line_color=color)
show(p)
| gpl-3.0 | -2,735,227,852,902,892,500 | 29.319527 | 88 | 0.595354 | false |
GillesArcas/numsed | numsed/sedcode.py | 1 | 27872 | """
Generation of sed code for numsed.
"""
from __future__ import print_function
import re
import subprocess
try:
import common
import opcoder
except:
from . import common
from . import opcoder
class SedConversion(common.NumsedConversion):
def __init__(self, source, transformation):
common.NumsedConversion.__init__(self, source, transformation)
x = opcoder.OpcodeConversion(source, transformation)
opcodes = x.trace().splitlines()
self.sed = make_sed_header(source) + sedcode(opcodes)
def trace(self):
return self.sed
def run(self, verbose=True):
return run_sed(self.sed, verbose)
HEADER1 = '''\
# This sed script is the result of the compilation of the following python script by numsed.py
# https://github.com/GillesArcas/numsed
%s
'''
HEADER2 = '''\
# This sed script is generated by numsed.py
# https://github.com/GillesArcas/numsed
'''
def make_sed_header(source):
if common.hasextension(source, '.py'):
with open(source) as f:
python = ''.join(['# ' + x for x in f.readlines()])
return HEADER1 % python
elif common.hasextension(source, '.opc'):
return HEADER2
else:
return ''
def run_sed(sed, verbose=True):
# save sed script
with open(common.TMP_SED, 'w') as f:
print(sed, file=f)
# save minimal input file
with open(common.TMP_INPUT, 'w') as f:
print('0', file=f)
com = 'sed -u -n -r -f %s %s' % (common.TMP_SED, common.TMP_INPUT)
if 0:
# does not work with travis
res = subprocess.check_output(com).decode('ascii')
else:
res = common.run(com, echo=verbose)
return res
# -- Generate sed code -------------------------------------------------------
def sedcode(opcode):
global function_labels, return_labels
function_labels = ['print.func']
return_labels = []
for instr in opcode:
if opcoder.is_function_label(instr):
function_labels.append(instr[1:].strip())
sedcode = normalize('\n'.join(opcode))
return_labels += ['end_of_script']
sedcode += '\n:call_function\n' + BRANCH_ON_NAME(function_labels)
sedcode += '\n:return\n' + BRANCH_ON_NAME(return_labels)
sedcode = prettyprint(sedcode)
return sedcode
def normalize(snippet):
r"""
Replace opcodes with sed instructions.
Each opcode is replaced with a sed snippet. Two conventions help to write
the snippets:
- identifiers beginning with a dot are labels and are replaced with sed
labels avoiding conflicts if the opcode is used several times.
- \d, which does not exist in sed, is replaced with [0-9]
"""
labels = []
for line in snippet.splitlines():
m = re.match(r' *:(\.\S+)', line)
if m:
labels.append(m.group(1))
for label in labels:
snippet = snippet.replace(label, new_label())
macros = opcoder.OPCODES
macros += ('PUSH', 'POP', 'POP2', 'SWAP', 'POP_TOP', 'DUP_TOP',
'CHECKINT2', 'CMP', 'EQU', 'NEQ', 'UADD', 'USUB', 'UMUL',
'FULLADD', 'FULLSUB', 'FULLMUL', 'MULBYDIGIT', 'DIVBY2', 'ODD')
for macro in macros:
func = globals()[macro]
def repl(m):
arg = '' if not m.group(1) else m.group(1)
larg = [] if not arg else [arg]
return '# %s %s\n' % (macro, arg) + normalize(func(*larg)) + ('# %s/\n' % macro)
snippet = re.sub(r'(?<!# )\b%s\b *([^#\n]*)' % macro, repl, snippet)
snippet = snippet.replace('\\d', '[0-9]')
return snippet
label_counter = 0
def new_label():
global label_counter
r = 'L%d' % label_counter
label_counter += 1
return r
return_counter = 0
def new_return():
global return_counter
r = 'R%d' % return_counter
return_counter += 1
return r
def prettyprint(sedcode):
sedcode2 = []
for instr in sedcode.splitlines():
instr = instr.strip()
if instr.startswith(':'):
pass
else:
instr = ' ' + instr
m = re.match('^([^#]*)(#.*)', instr)
if m:
instr = '%-40s%s' % (m.group(1).rstrip(), m.group(2))
sedcode2.append(instr)
return '\n'.join(sedcode2)
# -- Startup -----------------------------------------------------------------
def STARTUP():
snippet = r'''
x
s/.*/end_of_script;@/
x
b.start
:end_of_script
q
:NameError
s/.*/NameError: name & is not defined/
p
q
:UnknownLabel
s/.*/UnknownLabel: label & is not defined/
p
q
:NotPositiveInteger
s/^([^;]+;[^;]+).*/NotPositiveInteger: an operand is not a positive integer: \1/
p
q
:NotImplemented
s/.*/NotImplemented: not available with --literal, use --unsigned or --signed: &/
p
q
:.start
'''
return snippet
# -- Stack -------------------------------------------------------------------
def PUSH():
snippet = r''' # PS: N HS: X
G # PS: N\nX HS: X
s/\n/;/ # PS: N;X HS: X
h # PS: N;X HS: N;X
s/;.*// # PS: N HS: N;X
'''
return snippet
def POP():
snippet = r''' # PS: ? HS: N;X
g # PS: N;X HS: N;X
s/^[^;]*;// # PS: X HS: N;X
x # PS: N;X HS: X
s/;.*// # PS: N HS: X
'''
return snippet
def PUSH2():
snippet = r''' # PS: M;N HS: X
G # PS: M;N\nX HS: X
s/\n/;/ # PS: M;N;X HS: X
h # PS: M;N;X HS: M;N;X
s/^([^;]*;[^;]*);.*/\1/ # PS: M;N HS: M;N;X
'''
return snippet
def POP2():
snippet = r''' # PS: ? HS: M;N;X
g # PS: M;N;X HS: M;N;X
s/^[^;]*;[^;]*;// # PS: X HS: M;N;X
x # PS: M;N;X HS: X
s/(^[^;]*;[^;]*).*/\1/ # PS: M;N HS: X
'''
return snippet
def SWAP():
snippet = r''' # PS: ? HS: M;N;X
x # PS: M;N;X HS: ?
s/^([^;]*;)([^;]*;)/\2\1/ # PS: N;M;X HS: ?
x # PS: ? HS: N;M;X
'''
return snippet
def POP_TOP():
snippet = r'''
g
s/^[^;]+;//
h
'''
return snippet
def DUP_TOP():
snippet = r'''
g
s/^([^;]+;)/\1\1/
h
'''
return snippet
def ROT_TWO():
snippet = r''' # PS: ? HS: M;N;X
g # PS: M;N;X HS: ?
s/^([^;]*;)([^;]*;)/\2\1/ # PS: N;M;X HS: ?
h # PS: ? HS: N;M;X
'''
return snippet
def ROT_THREE():
snippet = r''' # PS: ? HS: M;N;P;X
g # PS: M;N;P;X HS: ?
s/^([^;]*;)([^;]*;)([^;]*;)/\2\3\1/
# PS: N;P;M;X HS: ?
h # PS: ? HS: N;P;M;X
'''
return snippet
def LOAD_CONST(const):
const = re.sub(r'^([\'"])(.*)\1$', r'\2', const) # remove quotes
snippet = r''' # PS: ? HS: X
g # PS: X HS: X
s/^/const;/ # PS: const;X HS: X
h # PS: const;X HS: const;X
'''
return snippet.replace('const', const)
# -- Tuples ------------------------------------------------------------------
def BUILD_TUPLE(n):
n = int(n)
lhs = '([^;]+);' * n
rhs = ','.join(r'\%d' % _ for _ in range(n, 0, -1))
snippet = r'''
g
s/lhs/rhs;/
h
'''
return snippet.replace('lhs', lhs).replace('rhs', rhs)
def UNPACK_SEQUENCE(n):
n = int(n)
lhs = '([^,]+),' * (n - 1) + '([^,]+)'
rhs = ';'.join(r'\%d' % _ for _ in range(1, n + 1))
snippet = r'''
g
s/lhs/rhs/
h
'''
return snippet.replace('lhs', lhs).replace('rhs', rhs)
# -- Name spaces -------------------------------------------------------------
def MAKE_CONTEXT():
snippet = '''
x
s/$/|/
x
'''
return snippet
def POP_CONTEXT():
snippet = '''
x
s/[|][^|]*$//
x
'''
return snippet
def LOAD_GLOBAL(name):
"""
TOS = val(name)
"""
snippet = r''' # PS: ? HS: ?;v;x?
g # PS: ?;v;x? HS: ?;v;x?
/@[^|]*;name;/! { s/.*/name/; b NameError }
# branch to error if var undefined
s/[^@]*@[^|]*;name;([^;|]*).*/\1;&/
# PS: x;?;v;x? HS: ?;v;x?
h # PS: x;?;v;x? HS: x;?;v;x?
'''
return snippet.replace('name', name)
def STORE_GLOBAL(name):
"""
name = POP()
"""
snippet = r''' # PS: ? HS: x;X
g
s/(@[^|]*);name;[^;|]*/\1/ # PS: x;X' HS: ? (del ;var;val in PS)
s/^([^;]*);([^@]*@)/\2;name;\1/ # PS: X;v;x HS: ?
h # PS: ? HS: X;v;x
'''
return snippet.replace('name', name)
STORE_NAME = STORE_GLOBAL
LOAD_NAME = LOAD_GLOBAL
def LOAD_FAST(name):
"""
TOS = val(name)
"""
snippet = r''' # PS: ? HS: ?;v;x?
g # PS: ?;v;x? HS: ?;v;x?
t.reset # reset t flag
:.reset
s/.*;name;([^;]*)[^|]*$/\1;&/ # PS: x;?;v;x? HS: ?;v;x?
t.next
s/.*/name/; b NameError # branch to error if var undefined
:.next
h # PS: ? HS: x;?;v;x?
'''
return snippet.replace('name', name)
def STORE_FAST(name):
"""
name = POP()
"""
snippet = r''' # PS: ? HS: x;X
g # PS: x;X HS: ?
s/;name;[^;|]*([^|]*)$/\1/ # PS: x;X' HS: ? (del ;var;val in PS)
s/^([^;]*);(.*)/\2;name;\1/ # PS: X';v;x HS: ?
h # PS: ? HS: X';v;x
'''
return snippet.replace('name', name)
# -- Functions ---------------------------------------------------------------
def MAKE_FUNCTION(x):
return ''
def CALL_FUNCTION(argc):
if int(argc) >= 256:
raise Exception('numsed: keyword parameters not handled (argc: %s)' % argc)
return_label = new_return()
return_labels.append(return_label)
nargs = '~' * int(argc) # number of arguments unary encoded
# argc parameters on top of stack above name of function
# add return label and swap parameters and name
snippet = r'''
g
s/^(([^;]*;){argc})([^;]+;)/\3\1return_label;/
s/^print.func;/print.func;nargs;/
h
POP
b call_function
:return_label
'''
return snippet.replace('argc', argc).replace('return_label', return_label).replace('nargs', nargs)
def RETURN_VALUE():
snippet = r''' # PS: ? HS: R;label;X
SWAP # PS: ? HS: label;R;X
POP # PS: label HS: R;X
b return
'''
return snippet
def BRANCH_ON_NAME(labels):
snippet = r''' # PS: label
t.test_return # t to next line to reset t flag
:.test_return # PS: label
'''
snippet = snippet.replace('test_return', new_label())
snippet += '\n'.join(('s/^%s$//;t %s' % (label, label) for label in labels))
snippet += '\nb UnknownLabel'
return snippet
# -- Control flow-------------------------------------------------------------
def POP_JUMP_IF_TRUE(target):
snippet = '''
POP
/^0$/!b target
'''
return snippet.replace('target', target)
def POP_JUMP_IF_FALSE(target):
snippet = '''
POP
/^0$/b target
'''
return snippet.replace('target', target)
def JUMP_IF_TRUE_OR_POP(target):
snippet = '''
g
/^0;/!b target
POP
'''
return snippet.replace('target', target)
def JUMP_IF_FALSE_OR_POP(target):
snippet = '''
g
/^0;/b target
POP
'''
return snippet.replace('target', target)
def JUMP(target):
return 'b ' + target
def SETUP_LOOP(_):
return ''
def POP_BLOCK():
return ''
def EXIT():
snippet = '''
q
'''
return snippet
# -- Type checking -----------------------------------------------------------
def CHECKINT2():
snippet = r''' # PS: X;Y HS: X;Y;Z
/^\d+;\d+/!b NotPositiveInteger
'''
return snippet
# -- Boolean operations ------------------------------------------------------
# Only UNARY_NOT is implemented. BINARY_AND and BINARY_OR implement
# binary operations (& and |) are not implemented. Logical or and and are
# compiled with tests and jumps.
def UNARY_NOT():
snippet = r'''
g
s/^0;/!;/ # use marker to avoid another substitution
s/^\d+/0/
s/^!/1/
h
'''
return snippet
# -- Compare operators -------------------------------------------------------
def EQU():
snippet = r'''
POP2 # PS: X;Y
s/^([^;]+);\1$/1/ # PS: 1 if equal
s/^[^;]+;[^;]+$/0/ # PS: 0 if different
PUSH
'''
return snippet
def NEQ():
snippet = r'''
POP2 # PS: X;Y
s/^([^;]+);\1$/0/ # PS: 0 if equal
s/^[^;]+;[^;]+$/1/ # PS: 1 if different
PUSH
'''
return snippet
def CMP():
snippet = r''' # PS: X;Y;
s/;/!;/g # PS: X!;Y!;
:.loop # PS: Xx!X';Yy!Y';
s/(\d)!(\d*;\d*)(\d)!/!\1\2!\3/ # PS: X!xX';Y!yY';
t.loop
/^!/!b.gt
/;!/!b.lt
# PS: !X;!Y;
s/^!(\d*)(\d*);!\1(\d*);/\2;\3;/# strip identical leading digits
/^;;$/ { s/.*/=/; b.end } # PS: = if all digits are equal
s/$/9876543210/
/^(.)\d*;(.)\d*;.*\1.*\2/b.gt
:.lt
s/.*/</ # PS: < if x < y
b.end
:.gt
s/.*/>/ # PS: > if x > y
:.end # PS: <|=|>
'''
return snippet
def COMPARE_OP(opname):
if opname == '==':
return 'EQU'
if opname == '!=':
return 'NEQ'
snippet = '''
SWAP
POP2
CHECKINT2
s/$/;/
CMP
y/<=>/xyz/
PUSH
'''
conv = {'==': '010', '!=': '101', '<': '100', '<=': '110', '>': '001', '>=': '011'}
return snippet.replace('xyz', conv[opname])
# - Addition and subtraction -------------------------------------------------
def HALFADD(): # for reference, not used
snippet = r'''
s/^(..)/&;9876543210;9876543210;/
s/(.)(.);\d*\1(\d*);\d*(\2\d*);/\3\49876543210;/
s/.{10}(.)\d{0,9}(\d{0,1})\d*;/0\2\1;/
/^0\d(\d);/s//1\1;/
s/;//
'''
return snippet
def FULLADD():
"""
Add two left digits with carry
Input PS: abcX with c = 0 or 1
Output PS: rX with r = a + b + c padded on two digits
"""
snippet = r'''
s/^(...)/\1;9876543210;9876543210;/
s/^(..)0/\1/
s/(.)(.)(\d)*;(\d*\1(\d*));\d*(\2\d*);/\3\5\6\4;/
s/.{10}(.)\d{0,9}(\d{0,1})\d*;/0\2\1;/
/^0\d(\d);/s//1\1;/
s/;//
'''
return snippet
def FULLSUB():
"""
Subtract two left digits with borrow
Input PS: abcX with c = 0 or 1
Output PS: xyX with if b+c <= a, x = 0, y = a-(b+c)
if b+c > a, x = 1, y = 10+a-(b+c)
"""
snippet = r'''
s/^(...)/\1;9876543210;0123456789;/
s/^(..)0/\1/
s/(.)(.)(\d*);\d*\2(\d*);(\d*(\1\d*));/\3\4\6\5;/
s/.{10}(.)\d{0,9}(\d{0,1})\d*;/0\2\1;/
/^0\d(\d);/s//1\1;/
s/;//
'''
return snippet
def UADD():
snippet = r'''
# PS: M;N*
s/\d*;\d*/0;&;/ # PS; 0;M;N;*
:.loop # PS: cR;Mm;Nn;*
s/^(\d*);(\d*)(\d);(\d*)(\d)/\3\5\1;\2;\4/
# PS: mncR;M;N;*
FULLADD # PS: abR;M;N;*
/^\d*;\d*\d;\d/b.loop # more digits in M and N
/^\d*;;;/{ # no more digits in M and N
s/;;;//
s/^0//
b.exit
}
/^1/{
s/;;/;0;/
b.loop
}
s/^0(\d*);(\d*);(\d*);/\2\3\1/
:.exit # PS: R*
'''
return snippet
def USUB():
snippet = r'''
# PS: M;N*
s/\d*;\d*/0;&;/ # PS; 0;M;N;*
:.loop # PS: cR;Mm;Nn;*
s/(\d*);(\d*)(\d);(\d*)(\d);/\3\5\1;\2;\4;/
# PS: mncR;M;N;*
FULLSUB # PS: c'rR;M;N;*
/^\d*;\d*\d;\d/ b.loop # more digits in M and N
/^\d*;;\d/b.nan # more digits in N
/^1\d*;;;/b.nan # same number of digits, but borrow
/^1/{ # if borrow,
s/^1(\d*;\d*);;/0\1;1;/ # move borrow to second operand
b.loop # and loop
}
s/^0(\d*);(\d*);;/\2\1/ # add remaining part of first operand
s/^0*(\d)/\1/ # del leading 0
b.end
:.nan # if invalid subtraction
s/^\d*;\d*;\d*;/NAN/ # PS: NAN*
:.end # PS: M-N|NAN
'''
return snippet
def BINARY_ADD():
"""
Implements TOS = TOS1 + TOS on unsigned integers (R = N + M).
"""
snippet = r''' # PS: ? HS: M;N;X
POP2 # PS: M;N HS: X
CHECKINT2
UADD # PS: R HS: X
PUSH # PS: R HS: R;X
'''
return snippet
def BINARY_SUBTRACT():
"""
Implements TOS = TOS1 - TOS on unsigned integers (R = N - M).
"""
snippet = r''' # PS: ? HS: M;N;X
SWAP
POP2 # PS: M;N HS: X
CHECKINT2
USUB # PS: R HS: X
PUSH # PS: R HS: R;X
'''
return snippet
def UNARY_POSITIVE():
"""
Implements TOS = +TOS.
"""
return ''
def UNARY_NEGATIVE():
"""
Implements TOS = -TOS.
"""
snippet = r''' # PS: ? HS: N;X
g # PS: N;X HS: N;X
s/^-/!/ # use marker to avoid another substitution
s/^\+/-/ #
s/^[0-9]/-&/ #
s/^-0;/0;/ # handle N = -0
s/^!// # remove marker
h # PS: R;X HS: R;X R = -N
'''
return snippet
# -- Multiplication ----------------------------------------------------------
def FULLMUL():
"""
Multiply two digits with carry (dc.sed version)
Input PS: abcX with a, b and c = 0 to 9
Output PS: rX with r = a * b + c padded on two digits
"""
snippet = r'''
/^(0.|.0)/ {
s/^../0/
b.exit
}
s/(...)/\1;9876543210aaaaaaaaa;9876543210aaaaaaaaa;/
s/(.)(.)(.);\d*\2.{9}(a*);\d*\3.{9}(a*);/\19\48\47\46\45\44\43\42\41\40\5;/
s/(.)[^;]*\1(.*);/\2;/
s/a\d/a/g
s/a{10}/b/g
s/(b*)(a*)/\19876543210;\29876543210/
s/.{9}(.)\d*;.{9}(.)\d*;/\1\2/
:.exit
'''
return snippet
def MULBYDIGIT():
"""
Multiply an integer by a digit
Input PS: aN;X with a = 0 to 9
Output PS: R;X
"""
snippet = r''' # PS: aNX
s/(.)(\d*)/0;\1;\2;/
:.loop
s/(\d*);(\d);(\d*)(\d)/\2\4\1;\2;\3/
FULLMUL
/^\d*;\d;\d/b.loop
s/;\d;;// # PS: RX
s/^0*(\d)/\1/
'''
return snippet
def UMUL_python(a, b): # for reference, not used
r = 0
m = 1
while b > 0:
digit = b % 10
b = b / 10
r += m * digit * a
m *= 10
return r
def UMUL():
"""
Multiply two integers
"""
snippet = r''' # PS: A;M;
s/^/0;;/ # PS: 0;;A;M;
:.loop # PS: P;S;A;Mm;
# P partial result to add, S last digits
s/(\d*;\d*;(\d*;)\d*)(\d)/\3\2\1/
# PS: mA;P;S;A;M;
MULBYDIGIT # PS: B;P;S;A;M; (B = m * A)
UADD # PS: R;S;A;M (R = B + P)
# PS: Rr;S;A;M;
s/(\d);/;\1/ # PS: R;rS;A;M;
s/^;/0;/ # R is the partial result to add, if empty put 0
/\d; *$/b.loop # Loop if still digits in M
# PS: R;S;A;;
s/(\d*);(\d*).*/\1\2/ # PS: RS
s/^0*(.)/\1/ # Normalize leading zeros
'''
return snippet
def BINARY_MULTIPLY():
"""
Implements TOS = TOS1 * TOS on unsigned integers (R = N * M).
"""
snippet = r''' # PS: ? HS: M;N;X
POP2 # PS: M;N HS: X
CHECKINT2
s/$/;/
UMUL # PS: R HS: X
PUSH # PS: R HS: R;X
'''
return snippet
def BINARY_FLOOR_DIVIDE():
# not implemented in sed, implemented in python
return '''
s/.*/div/
b NotImplemented
'''
def BINARY_MODULO():
# not implemented in sed, implemented in python
return '''
s/.*/mod/
b NotImplemented
'''
def BINARY_POWER():
# not implemented in sed, implemented in python
return '''
s/.*/power/
b NotImplemented
'''
# -- Helper opcodes ----------------------------------------------------------
def IS_POSITIVE():
snippet = r''' # PS: ? HS: N;X
g # PS: N;X HS: N;X
s/^[0-9+][^;]*/1/ # PS: 1;X HS: N;X if pos
s/^-[^;]+/0/ # PS: 0;X HS: N;X if neg
h # PS: r;X HS: r;X r = 0 or 1
'''
return snippet
def ABS():
snippet = r''' # PS: ? HS: N;X
g # PS: N;X HS: N;X
s/^-// # remove minus sign
h # PS: R;X HS: R;X R = abs(N)
'''
return snippet
def DIVBY2():
snippet = r''' # PS: N;X
s/^[0-9]+;/0;&;/ # PS: 0;N;;X
:.loop
/^.;;/b.end
# PS: c;nN;R;X
s/;(.)/\1;/ # PS: cn;N;R;X
s/(..)/\1!0000!0110!0201!0311!0402!0512!0603!0713!0804!0914!1005!1115!1206!1316!1407!1517!1608!1718!1809!1919/
# PS: cn!LUT;N;R;X
s/(..).*!\1(.)(.)[^;]*;([^;]*);([^;]*);/\2;\4;\5\3;/
# PS: r;N;Rq;X
b.loop
:.end # PS: c;;R;X
s/.;;0?(\d)/\1/ # PS: R;X R = N // 2
'''
return snippet
def DIVIDE_BY_TWO():
snippet = r''' # PS: ? HS: N;X
g # PS: N;X HS: N;X
DIVBY2 # PS: R;X HS: N;X
h # PS: R;X HS: R;X R = N // 2
'''
return snippet
def ODD():
snippet = r''' # PS: N;X
s/^\d*(\d)/\1!00!11!20!31!40!51!60!71!80!91/
s/^(.).*!\1(.)[^;]*/\2/ # PS: R;X R = 0 if even, or 1 if odd
'''
return snippet
def IS_ODD():
snippet = r''' # PS: ? HS: N;X
g # PS: N;X HS: N;X
ODD
h # PS: R;X HS: R;X R = 0 if even, or 1 if odd
'''
return snippet
def DIVIDE_BY_TEN():
snippet = r''' # PS: ? HS: N;X
g # PS: N;X HS: N;X
s/\d;/;/ # remove last digit
s/^;/0;/ # R = 0 if single digit input
h # PS: R;X HS: R;X R = N // 10
'''
return snippet
def MODULO_TEN():
snippet = r''' # PS: ? HS: N;X
g # PS: N;X HS: N;X
s/\d*(\d);/\1;/ # keep last digit
h # PS: R;X HS: R;X R = N % 10
'''
return snippet
def DIVMOD10():
snippet = r''' # PS: ? HS: N;X
g # PS: N;X HS: N;X
s/(\d);/,\1;/ # PS: Q;R;X HS: N;X
s/^,/0,/ # Q = 0 if single digit input
h # PS: Q;R;X HS: Q;R;X Q,R = divmod(N, 10)
'''
return snippet
# -- Printing ----------------------------------------------------------------
def PRINT_ITEM():
snippet = r''' # PS: ? HS: N;X
POP # PS: N HS: X
p
'''
return snippet
def PRINT_NEWLINE():
return ''
def PRINT_ITEMS():
snippet = r''' # PS: ? HS: ~~~;C;B;A;X
g
:.loop # PS: C ~~;B;A;X
s/([^~]*)~(~*);([^;]*);/\3 \1\2;/
# PS: A B ~;C;X
t.loop
s/ ;/;/ # remove extra space
# PS: A B C ;X
h # PS: A B C ;X HS: A B C ;X
POP # PS: A B C HS: X
p
'''
return snippet
# -- Debug -------------------------------------------------------------------
def TRACE(msg):
snippet = '''
i msg
p
x
p
x
'''
# return ''
return snippet.replace('msg', msg)
| mit | 4,287,439,202,949,444,600 | 26.487179 | 118 | 0.355446 | false |
andyliuliming/azure-linux-extensions | OmsAgent/omsagent.py | 1 | 63449 | #!/usr/bin/env python
#
# OmsAgentForLinux Extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import re
import sys
import traceback
import time
import platform
import subprocess
import json
import base64
import inspect
import urllib
import urllib2
try:
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as HUtil
except Exception as e:
# These utils have checks around the use of them; this is not an exit case
print('Importing utils failed with error: {0}'.format(e))
# Global Variables
PackagesDirectory = 'packages'
BundleFileName = 'omsagent-1.4.4-210.universal.x64.sh'
GUIDRegex = r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
GUIDOnlyRegex = r'^' + GUIDRegex + '$'
SCOMCertIssuerRegex = r'^[\s]*Issuer:[\s]*CN=SCX-Certificate/title=SCX' + GUIDRegex + ', DC=.*$'
SCOMPort = 1270
PostOnboardingSleepSeconds = 5
InitialRetrySleepSeconds = 30
IsUpgrade = False
# Paths
OMSAdminPath = '/opt/microsoft/omsagent/bin/omsadmin.sh'
OMSAgentServiceScript = '/opt/microsoft/omsagent/bin/service_control'
OMIConfigEditorPath = '/opt/omi/bin/omiconfigeditor'
OMIServerConfPath = '/etc/opt/omi/conf/omiserver.conf'
EtcOMSAgentPath = '/etc/opt/microsoft/omsagent/'
VarOMSAgentPath = '/var/opt/microsoft/omsagent/'
SCOMCertPath = '/etc/opt/microsoft/scx/ssl/scx.pem'
# Commands
# Always use upgrade - will handle install if scx, omi are not installed or
# upgrade if they are
InstallCommandTemplate = '{0} --upgrade'
UninstallCommandTemplate = '{0} --remove'
WorkspaceCheckCommand = '{0} -l'.format(OMSAdminPath)
OnboardCommandWithOptionalParams = '{0} -w {1} -s {2} {3}'
RestartOMSAgentServiceCommand = '{0} restart'.format(OMSAgentServiceScript)
DisableOMSAgentServiceCommand = '{0} disable'.format(OMSAgentServiceScript)
# Error codes
DPKGLockedErrorCode = 12
InstallErrorCurlNotInstalled = 64
EnableErrorOMSReturned403 = 5
EnableErrorOMSReturnedNon200 = 6
EnableErrorResolvingHost = 7
EnableErrorOnboarding = 8
EnableCalledBeforeSuccessfulInstall = 9
UnsupportedOpenSSL = 60
# OneClick error codes
OneClickErrorCode = 40
ManagedIdentityExtMissingErrorCode = 41
ManagedIdentityExtErrorCode = 42
MetadataAPIErrorCode = 43
OMSServiceOneClickErrorCode = 44
MissingorInvalidParameterErrorCode = 11
UnwantedMultipleConnectionsErrorCode = 10
CannotConnectToOMSErrorCode = 55
# Configuration
HUtilObject = None
SettingsSequenceNumber = None
HandlerEnvironment = None
SettingsDict = None
# OneClick Constants
ManagedIdentityExtListeningURLPath = '/var/lib/waagent/ManagedIdentity-Settings'
GUIDRegex = '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
OAuthTokenResource = 'https://management.core.windows.net/'
OMSServiceValidationEndpoint = 'https://global.oms.opinsights.azure.com/ManagedIdentityService.svc/Validate'
AutoManagedWorkspaceCreationSleepSeconds = 20
# vmResourceId Metadata Service
VMResourceIDMetadataHost = '169.254.169.254'
VMResourceIDMetadataEndpoint = 'http://{0}/metadata/instance?api-version=2017-08-01'.format(VMResourceIDMetadataHost)
# Change permission of log path - if we fail, that is not an exit case
try:
ext_log_path = '/var/log/azure/'
if os.path.exists(ext_log_path):
os.chmod(ext_log_path, 700)
except:
pass
def main():
"""
Main method
Parse out operation from argument, invoke the operation, and finish.
"""
init_waagent_logger()
waagent_log_info('OmsAgentForLinux started to handle.')
global IsUpgrade
# Determine the operation being executed
operation = None
try:
option = sys.argv[1]
if re.match('^([-/]*)(disable)', option):
operation = 'Disable'
elif re.match('^([-/]*)(uninstall)', option):
operation = 'Uninstall'
elif re.match('^([-/]*)(install)', option):
operation = 'Install'
elif re.match('^([-/]*)(enable)', option):
operation = 'Enable'
elif re.match('^([-/]*)(update)', option):
operation = 'Update'
IsUpgrade = True
except Exception as e:
waagent_log_error(str(e))
if operation is None:
log_and_exit('Unknown', 1, 'No valid operation provided')
# Set up for exit code and any error messages
exit_code = 0
message = '{0} succeeded'.format(operation)
# Invoke operation
try:
global HUtilObject
HUtilObject = parse_context(operation)
exit_code = operations[operation]()
# Exit code 1 indicates a general problem that doesn't have a more
# specific error code; it often indicates a missing dependency
if exit_code is 1 and operation == 'Install':
message = 'Install failed with exit code 1. Please check that ' \
'dependencies are installed. For details, check logs ' \
'in /var/log/azure/Microsoft.EnterpriseCloud.' \
'Monitoring.OmsAgentForLinux'
elif exit_code is DPKGLockedErrorCode and operation == 'Install':
message = 'Install failed with exit code {0} because the ' \
'package manager on the VM is currently locked: ' \
'please wait and try again'.format(DPKGLockedErrorCode)
elif exit_code is not 0:
message = '{0} failed with exit code {1}'.format(operation,
exit_code)
except OmsAgentForLinuxException as e:
exit_code = e.error_code
message = e.get_error_message(operation)
except Exception as e:
exit_code = 1
message = '{0} failed with error: {1}\n' \
'Stacktrace: {2}'.format(operation, e,
traceback.format_exc())
# Finish up and log messages
log_and_exit(operation, exit_code, message)
def dummy_command():
"""
Do nothing and return 0
"""
return 0
def install():
"""
Ensure that this VM distro and version are supported.
Install the OMSAgent shell bundle, using retries.
Note: install operation times out from WAAgent at 15 minutes, so do not
wait longer.
"""
exit_if_vm_not_supported('Install')
public_settings, protected_settings = get_settings()
if public_settings is None:
raise ParameterMissingException('Public configuration must be ' \
'provided')
workspaceId = public_settings.get('workspaceId')
check_workspace_id(workspaceId)
# In the case where a SCOM connection is already present, we should not
# create conflicts by installing the OMSAgent packages
stopOnMultipleConnections = public_settings.get('stopOnMultipleConnections')
if (stopOnMultipleConnections is not None
and stopOnMultipleConnections is True):
detect_multiple_connections(workspaceId)
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
os.chmod(bundle_path, 100)
cmd = InstallCommandTemplate.format(bundle_path)
hutil_log_info('Running command "{0}"'.format(cmd))
# Retry, since install can fail due to concurrent package operations
exit_code = run_command_with_retries(cmd, retries = 15,
retry_check = retry_if_dpkg_locked_or_curl_is_not_found,
final_check = final_check_if_dpkg_locked)
return exit_code
def uninstall():
"""
Uninstall the OMSAgent shell bundle.
This is a somewhat soft uninstall. It is not a purge.
Note: uninstall operation times out from WAAgent at 5 minutes
"""
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
global IsUpgrade
os.chmod(bundle_path, 100)
cmd = UninstallCommandTemplate.format(bundle_path)
hutil_log_info('Running command "{0}"'.format(cmd))
# Retry, since uninstall can fail due to concurrent package operations
exit_code = run_command_with_retries(cmd, retries = 5,
retry_check = retry_if_dpkg_locked_or_curl_is_not_found,
final_check = final_check_if_dpkg_locked)
if IsUpgrade:
IsUpgrade = False
else:
remove_workspace_configuration()
return exit_code
def enable():
"""
Onboard the OMSAgent to the specified OMS workspace.
This includes enabling the OMS process on the VM.
This call will return non-zero or throw an exception if
the settings provided are incomplete or incorrect.
Note: enable operation times out from WAAgent at 5 minutes
"""
exit_if_vm_not_supported('Enable')
public_settings, protected_settings = get_settings()
if public_settings is None:
raise ParameterMissingException('Public configuration must be ' \
'provided')
if protected_settings is None:
raise ParameterMissingException('Private configuration must be ' \
'provided')
vmResourceId = protected_settings.get('vmResourceId')
# If vmResourceId is not provided in private settings, get it from metadata API
if vmResourceId is None or not vmResourceId:
vmResourceId = get_vmresourceid_from_metadata()
hutil_log_info('vmResourceId from Metadata API is {0}'.format(vmResourceId))
if vmResourceId is None:
raise MetadataAPIException('Failed to get vmResourceId from ' \
'Metadata API')
enableAutomaticManagement = public_settings.get('enableAutomaticManagement')
if (enableAutomaticManagement is not None
and enableAutomaticManagement is True):
hutil_log_info('enableAutomaticManagement is set to true; the ' \
'workspace ID and key will be determined by the OMS ' \
'service.')
workspaceInfo = retrieve_managed_workspace(vmResourceId)
if (workspaceInfo is None or 'WorkspaceId' not in workspaceInfo
or 'WorkspaceKey' not in workspaceInfo):
raise OneClickException('Workspace info was not determined')
else:
# Note: do NOT log workspace keys!
hutil_log_info('Managed workspaceInfo has been retrieved')
workspaceId = workspaceInfo['WorkspaceId']
workspaceKey = workspaceInfo['WorkspaceKey']
try:
check_workspace_id_and_key(workspaceId, workspaceKey)
except InvalidParameterError as e:
raise OMSServiceOneClickException('Received invalid ' \
'workspace info: ' \
'{0}'.format(e))
else:
workspaceId = public_settings.get('workspaceId')
workspaceKey = protected_settings.get('workspaceKey')
check_workspace_id_and_key(workspaceId, workspaceKey)
# Check if omsadmin script is available
if not os.path.exists(OMSAdminPath):
log_and_exit('Enable', EnableCalledBeforeSuccessfulInstall,
'OMSAgent onboarding script {0} does not exist. Enable ' \
'cannot be called before install.'.format(OMSAdminPath))
vmResourceIdParam = '-a {0}'.format(vmResourceId)
proxy = protected_settings.get('proxy')
proxyParam = ''
if proxy is not None:
proxyParam = '-p {0}'.format(proxy)
optionalParams = '{0} {1}'.format(proxyParam, vmResourceIdParam)
onboard_cmd = OnboardCommandWithOptionalParams.format(OMSAdminPath,
workspaceId,
workspaceKey,
optionalParams)
hutil_log_info('Handler initiating onboarding.')
exit_code = run_command_with_retries(onboard_cmd, retries = 5,
retry_check = retry_onboarding,
final_check = raise_if_no_internet,
check_error = True, log_cmd = False)
if exit_code is 0:
# Create a marker file to denote the workspace that was
# onboarded using the extension. This will allow supporting
# multi-homing through the extension like Windows does
extension_marker_path = os.path.join(EtcOMSAgentPath, workspaceId,
'conf/.azure_extension_marker')
if os.path.exists(extension_marker_path):
hutil_log_info('Extension marker file {0} already ' \
'created'.format(extension_marker_path))
else:
try:
open(extension_marker_path, 'w').close()
hutil_log_info('Created extension marker file ' \
'{0}'.format(extension_marker_path))
except IOError as e:
hutil_log_error('Error creating {0} with error: ' \
'{1}'.format(extension_marker_path, e))
# Sleep to prevent bombarding the processes, then restart all processes
# to resolve any issues with auto-started processes from --upgrade
time.sleep(PostOnboardingSleepSeconds)
run_command_and_log(RestartOMSAgentServiceCommand)
return exit_code
def remove_workspace_configuration():
"""
This is needed to distinguish between extension removal vs extension upgrade.
Its a workaround for waagent upgrade routine calling 'remove' on an old version
before calling 'upgrade' on new extension version issue.
In upgrade case, we need workspace configuration to persist when in
remove case we need all the files be removed.
This method will remove all the files/folders from the workspace path in Etc and Var.
"""
public_settings, _ = get_settings()
workspaceId = public_settings.get('workspaceId')
etc_remove_path = os.path.join(EtcOMSAgentPath, workspaceId)
var_remove_path = os.path.join(VarOMSAgentPath, workspaceId)
for main_dir in [etc_remove_path, var_remove_path]:
for root, dirs, files in os.walk(main_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(os.path.join(main_dir))
hutil_log_info('Removed Workspace Configuration')
def get_vmresourceid_from_metadata():
req = urllib2.Request(VMResourceIDMetadataEndpoint)
req.add_header('Metadata', 'True')
try:
response = json.loads(urllib2.urlopen(req).read())
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachines/{2}'.format(response['compute']['subscriptionId'],response['compute']['resourceGroupName'],response['compute']['name'])
except urllib2.HTTPError as e:
hutil_log_error('Request to Metadata service URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from Metadata service: ' \
'{0}'.format(e.read()))
return None
except:
hutil_log_error('Unexpected error from Metadata service')
return None
def retrieve_managed_workspace(vm_resource_id):
"""
EnableAutomaticManagement has been set to true; the
ManagedIdentity extension and the VM Resource ID are also
required for the OneClick scenario
Using these and the Metadata API, we will call the OMS service
to determine what workspace ID and key to onboard to
"""
# Check for OneClick scenario requirements:
if not os.path.exists(ManagedIdentityExtListeningURLPath):
raise ManagedIdentityExtMissingException
# Determine the Tenant ID using the Metadata API
tenant_id = get_tenant_id_from_metadata_api(vm_resource_id)
# Retrieve an OAuth token using the ManagedIdentity extension
if tenant_id is not None:
hutil_log_info('Tenant ID from Metadata API is {0}'.format(tenant_id))
access_token = get_access_token(tenant_id, OAuthTokenResource)
else:
return None
# Query OMS service for the workspace info for onboarding
if tenant_id is not None and access_token is not None:
return get_workspace_info_from_oms(vm_resource_id, tenant_id,
access_token)
else:
return None
def disable():
"""
Disable all OMS workspace processes on the VM.
Note: disable operation times out from WAAgent at 15 minutes
"""
# Check if the service control script is available
if not os.path.exists(OMSAgentServiceScript):
log_and_exit('Disable', 1, 'OMSAgent service control script {0} does' \
'not exist. Disable cannot be called ' \
'before install.'.format(OMSAgentServiceScript))
return 1
exit_code, output = run_command_and_log(DisableOMSAgentServiceCommand)
return exit_code
# Dictionary of operations strings to methods
operations = {'Disable' : disable,
'Uninstall' : uninstall,
'Install' : install,
'Enable' : enable,
# Upgrade is noop since omsagent.py->install() will be called
# everytime upgrade is done due to upgradeMode =
# "UpgradeWithInstall" set in HandlerManifest
'Update' : dummy_command
}
def parse_context(operation):
"""
Initialize a HandlerUtil object for this operation.
If the required modules have not been imported, this will return None.
"""
hutil = None
if ('Utils.WAAgentUtil' in sys.modules
and 'Utils.HandlerUtil' in sys.modules):
try:
hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error)
hutil.do_parse_context(operation)
# parse_context may throw KeyError if necessary JSON key is not
# present in settings
except KeyError as e:
waagent_log_error('Unable to parse context with error: ' \
'{0}'.format(e))
raise ParameterMissingException
return hutil
def is_vm_supported_for_extension():
"""
Checks if the VM this extension is running on is supported by OMSAgent
Returns for platform.linux_distribution() vary widely in format, such as
'7.3.1611' returned for a VM with CentOS 7, so the first provided
digits must match
The supported distros of the OMSAgent-for-Linux, as well as Ubuntu 16.10,
are allowed to utilize this VM extension. All other distros will get
error code 51
"""
supported_dists = {'redhat' : ('5', '6', '7'), # CentOS
'centos' : ('5', '6', '7'), # CentOS
'red hat' : ('5', '6', '7'), # Oracle, RHEL
'oracle' : ('5', '6', '7'), # Oracle
'debian' : ('6', '7', '8', '9'), # Debian
'ubuntu' : ('12.04', '14.04', '15.04', '15.10',
'16.04', '16.10'), # Ubuntu
'suse' : ('11', '12') #SLES
}
try:
vm_dist, vm_ver, vm_id = platform.linux_distribution()
except AttributeError:
vm_dist, vm_ver, vm_id = platform.dist()
vm_supported = False
# Find this VM distribution in the supported list
for supported_dist in supported_dists.keys():
if not vm_dist.lower().startswith(supported_dist):
continue
# Check if this VM distribution version is supported
vm_ver_split = vm_ver.split('.')
for supported_ver in supported_dists[supported_dist]:
supported_ver_split = supported_ver.split('.')
# If vm_ver is at least as precise (at least as many digits) as
# supported_ver and matches all the supported_ver digits, then
# this VM is guaranteed to be supported
vm_ver_match = True
for idx, supported_ver_num in enumerate(supported_ver_split):
try:
supported_ver_num = int(supported_ver_num)
vm_ver_num = int(vm_ver_split[idx])
except IndexError:
vm_ver_match = False
break
if vm_ver_num is not supported_ver_num:
vm_ver_match = False
break
if vm_ver_match:
vm_supported = True
break
if vm_supported:
break
return vm_supported, vm_dist, vm_ver
def exit_if_vm_not_supported(operation):
"""
Check if this VM distro and version are supported by the OMSAgent.
If this VM is not supported, log the proper error code and exit.
"""
vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension()
if not vm_supported:
log_and_exit(operation, 51, 'Unsupported operation system: ' \
'{0} {1}'.format(vm_dist, vm_ver))
return 0
def exit_if_openssl_unavailable(operation):
"""
Check if the openssl commandline interface is available to use
If not, throw error to return UnsupportedOpenSSL error code
"""
exit_code, output = run_get_output('which openssl', True, False)
if exit_code is not 0:
log_and_exit(operation, UnsupportedOpenSSL, 'OpenSSL is not available')
return 0
def check_workspace_id_and_key(workspace_id, workspace_key):
"""
Validate formats of workspace_id and workspace_key
"""
check_workspace_id(workspace_id)
# Validate that workspace_key is of the correct format (base64-encoded)
if workspace_key is None:
raise ParameterMissingException('Workspace key must be provided')
try:
encoded_key = base64.b64encode(base64.b64decode(workspace_key))
if encoded_key != workspace_key:
raise InvalidParameterError('Workspace key is invalid')
except TypeError:
raise InvalidParameterError('Workspace key is invalid')
def check_workspace_id(workspace_id):
"""
Validate that workspace_id matches the GUID regex
"""
if workspace_id is None:
raise ParameterMissingException('Workspace ID must be provided')
search = re.compile(GUIDOnlyRegex, re.M)
if not search.match(workspace_id):
raise InvalidParameterError('Workspace ID is invalid')
def detect_multiple_connections(workspace_id):
"""
If the VM already has a workspace/SCOM configured, then we should
disallow a new connection when stopOnMultipleConnections is used
Throw an exception in these cases:
- The workspace with the given workspace_id has not been onboarded
to the VM, but at least one other workspace has been
- The workspace with the given workspace_id has not been onboarded
to the VM, and the VM is connected to SCOM
If the extension operation is connecting to an already-configured
workspace, it is not a stopping case
"""
other_connection_exists = False
if os.path.exists(OMSAdminPath):
exit_code, output = run_get_output(WorkspaceCheckCommand,
chk_err = False)
if output.strip().lower() != 'no workspace':
for line in output.split('\n'):
if workspace_id in line:
hutil_log_info('The workspace to be enabled has already ' \
'been configured on the VM before; ' \
'continuing despite ' \
'stopOnMultipleConnections flag')
return
else:
# Note: if scom workspace dir is created, a line containing
# "Workspace(SCOM Workspace): scom" will be here
# If any other line is here, it may start sending data later
other_connection_exists = True
else:
for dir_name, sub_dirs, files in os.walk(EtcOMSAgentPath):
for sub_dir in sub_dirs:
sub_dir_name = os.path.basename(sub_dir)
workspace_search = re.compile(GUIDOnlyRegex, re.M)
if sub_dir_name == workspace_id:
hutil_log_info('The workspace to be enabled has already ' \
'been configured on the VM before; ' \
'continuing despite ' \
'stopOnMultipleConnections flag')
return
elif (workspace_search.match(sub_dir_name)
or sub_dir_name == 'scom'):
other_connection_exists = True
if other_connection_exists:
err_msg = ('This machine is already connected to some other Log ' \
'Analytics workspace, please set ' \
'stopOnMultipleConnections to false in public ' \
'settings or remove this property, so this machine ' \
'can connect to new workspaces, also it means this ' \
'machine will get billed multiple times for each ' \
'workspace it report to. ' \
'(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')
# This exception will get caught by the main method
raise UnwantedMultipleConnectionsException(err_msg)
else:
detect_scom_connection()
def detect_scom_connection():
"""
If these two conditions are met, then we can assume the
VM is monitored
by SCOM:
1. SCOMPort is open and omiserver is listening on it
2. scx certificate is signed by SCOM server
To determine it check for existence of below two
conditions:
1. SCOMPort is open and omiserver is listening on it:
/etc/omi/conf/omiserver.conf can be parsed to
determine it.
2. scx certificate is signed by SCOM server: scom cert
is present @ /etc/opt/omi/ssl/omi-host-<hostname>.pem
(/etc/opt/microsoft/scx/ssl/scx.pem is a softlink to
this). If the VM is monitored by SCOM then issuer
field of the certificate will have a value like
CN=SCX-Certificate/title=<GUID>, DC=<SCOM server hostname>
(e.g CN=SCX-Certificate/title=SCX94a1f46d-2ced-4739-9b6a-1f06156ca4ac,
DC=NEB-OM-1502733)
Otherwise, if a scom configuration directory has been
created, we assume SCOM is in use
"""
scom_port_open = None # return when determine this is false
cert_signed_by_scom = False
if os.path.exists(OMSAdminPath):
scom_port_open = detect_scom_using_omsadmin()
if scom_port_open is False:
return
# If omsadmin.sh option is not available, use omiconfigeditor
if (scom_port_open is None and os.path.exists(OMIConfigEditorPath)
and os.path.exists(OMIServerConfPath)):
scom_port_open = detect_scom_using_omiconfigeditor()
if scom_port_open is False:
return
# If omiconfigeditor option is not available, directly parse omiserver.conf
if scom_port_open is None and os.path.exists(OMIServerConfPath):
scom_port_open = detect_scom_using_omiserver_conf()
if scom_port_open is False:
return
if scom_port_open is None:
hutil_log_info('SCOM port could not be determined to be open')
return
# Parse the certificate to determine if SCOM issued it
if os.path.exists(SCOMCertPath):
exit_if_openssl_unavailable('Install')
cert_cmd = 'openssl x509 -in {0} -noout -text'.format(SCOMCertPath)
cert_exit_code, cert_output = run_get_output(cert_cmd, chk_err = False,
log_cmd = False)
if cert_exit_code is 0:
issuer_re = re.compile(SCOMCertIssuerRegex, re.M)
if issuer_re.search(cert_output):
hutil_log_info('SCOM cert exists and is signed by SCOM server')
cert_signed_by_scom = True
else:
hutil_log_info('SCOM cert exists but is not signed by SCOM ' \
'server')
else:
hutil_log_error('Error reading SCOM cert; cert could not be ' \
'determined to be signed by SCOM server')
else:
hutil_log_info('SCOM cert does not exist')
if scom_port_open and cert_signed_by_scom:
err_msg = ('This machine may already be connected to a System ' \
'Center Operations Manager server. Please set ' \
'stopOnMultipleConnections to false in public settings ' \
'or remove this property to allow connection to the Log ' \
'Analytics workspace. ' \
'(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')
raise UnwantedMultipleConnectionsException(err_msg)
def detect_scom_using_omsadmin():
"""
This method assumes that OMSAdminPath exists; if packages have not
been installed yet, this may not exist
Returns True if omsadmin.sh indicates that SCOM port is open
"""
omsadmin_cmd = '{0} -o'.format(OMSAdminPath)
exit_code, output = run_get_output(omsadmin_cmd, False, False)
# Guard against older omsadmin.sh versions
if ('illegal option' not in output.lower()
and 'unknown option' not in output.lower()):
if exit_code is 0:
hutil_log_info('According to {0}, SCOM port is ' \
'open'.format(omsadmin_cmd))
return True
elif exit_code is 1:
hutil_log_info('According to {0}, SCOM port is not ' \
'open'.format(omsadmin_cmd))
return False
def detect_scom_using_omiconfigeditor():
"""
This method assumes that the relevant files exist
Returns True if omiconfigeditor indicates that SCOM port is open
"""
omi_cmd = '{0} httpsport -q {1} < {2}'.format(OMIConfigEditorPath,
SCOMPort, OMIServerConfPath)
exit_code, output = run_get_output(omi_cmd, False, False)
# Guard against older omiconfigeditor versions
if ('illegal option' not in output.lower()
and 'unknown option' not in output.lower()):
if exit_code is 0:
hutil_log_info('According to {0}, SCOM port is ' \
'open'.format(omi_cmd))
return True
elif exit_code is 1:
hutil_log_info('According to {0}, SCOM port is not ' \
'open'.format(omi_cmd))
return False
def detect_scom_using_omiserver_conf():
"""
This method assumes that the relevant files exist
Returns True if omiserver.conf indicates that SCOM port is open
"""
with open(OMIServerConfPath, 'r') as omiserver_file:
omiserver_txt = omiserver_file.read()
httpsport_search = r'^[\s]*httpsport[\s]*=(.*)$'
httpsport_re = re.compile(httpsport_search, re.M)
httpsport_matches = httpsport_re.search(omiserver_txt)
if (httpsport_matches is not None and
httpsport_matches.group(1) is not None):
ports = httpsport_matches.group(1)
ports = ports.replace(',', ' ')
ports_list = ports.split(' ')
if str(SCOMPort) in ports_list:
hutil_log_info('SCOM port is listed in ' \
'{0}'.format(OMIServerConfPath))
return True
else:
hutil_log_info('SCOM port is not listed in ' \
'{0}'.format(OMIServerConfPath))
else:
hutil_log_info('SCOM port is not listed in ' \
'{0}'.format(OMIServerConfPath))
return False
def run_command_and_log(cmd, check_error = True, log_cmd = True):
"""
Run the provided shell command and log its output, including stdout and
stderr.
The output should not contain any PII, but the command might. In this case,
log_cmd should be set to False.
"""
exit_code, output = run_get_output(cmd, check_error, log_cmd)
if log_cmd:
hutil_log_info('Output of command "{0}": \n{1}'.format(cmd, output))
else:
hutil_log_info('Output: \n{0}'.format(output))
return exit_code, output
def run_command_with_retries(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check returns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code
def is_dpkg_locked(exit_code, output):
"""
If dpkg is locked, the output will contain a message similar to 'dpkg
status database is locked by another process'
"""
if exit_code is not 0:
dpkg_locked_search = r'^.*dpkg.+lock.*$'
dpkg_locked_re = re.compile(dpkg_locked_search, re.M)
if dpkg_locked_re.search(output):
return True
return False
def was_curl_found(exit_code, output):
"""
Returns false if exit_code indicates that curl was not installed; this can
occur when package lists need to be updated, or when some archives are
out-of-date
"""
if exit_code is InstallErrorCurlNotInstalled:
return False
return True
def retry_if_dpkg_locked_or_curl_is_not_found(exit_code, output):
"""
Some commands fail because the package manager is locked (apt-get/dpkg
only); this will allow retries on failing commands.
Sometimes curl's dependencies (i.e. libcurl) are not installed; if this
is the case on a VM with apt-get, 'apt-get -f install' should be run
Sometimes curl is not installed and is also not found in the package list;
if this is the case on a VM with apt-get, update the package list
"""
retry_verbosely = False
dpkg_locked = is_dpkg_locked(exit_code, output)
curl_found = was_curl_found(exit_code, output)
apt_get_exit_code, apt_get_output = run_get_output('which apt-get',
chk_err = False,
log_cmd = False)
if dpkg_locked:
return True, 'Retrying command because package manager is locked.', \
retry_verbosely
elif (not curl_found and apt_get_exit_code is 0 and
('apt-get -f install' in output
or 'Unmet dependencies' in output.lower())):
hutil_log_info('Installing all dependencies of curl:')
run_command_and_log('apt-get -f install')
return True, 'Retrying command because curl and its dependencies ' \
'needed to be installed', retry_verbosely
elif not curl_found and apt_get_exit_code is 0:
hutil_log_info('Updating package lists to make curl available')
run_command_and_log('apt-get update')
return True, 'Retrying command because package lists needed to be ' \
'updated', retry_verbosely
else:
return False, '', False
def final_check_if_dpkg_locked(exit_code, output):
"""
If dpkg is still locked after the retries, we want to return a specific
error code
"""
dpkg_locked = is_dpkg_locked(exit_code, output)
if dpkg_locked:
exit_code = DPKGLockedErrorCode
return exit_code
def retry_onboarding(exit_code, output):
"""
Retry under any of these conditions:
- If the onboarding request returns 403: this may indicate that the agent
GUID and certificate should be re-generated
- If the onboarding request returns a different non-200 code: the OMS
service may be temporarily unavailable
- If the onboarding curl command returns an unaccounted-for error code,
we should retry with verbose logging
"""
retry_verbosely = False
if exit_code is EnableErrorOMSReturned403:
return True, 'Retrying the onboarding command to attempt generating ' \
'a new agent ID and certificate.', retry_verbosely
elif exit_code is EnableErrorOMSReturnedNon200:
return True, 'Retrying; the OMS service may be temporarily ' \
'unavailable.', retry_verbosely
elif exit_code is EnableErrorOnboarding:
return True, 'Retrying with verbose logging.', True
return False, '', False
def raise_if_no_internet(exit_code, output):
"""
Raise the CannotConnectToOMSException exception if the onboarding
script returns the error code to indicate that the OMS service can't be
resolved
"""
if exit_code is EnableErrorResolvingHost:
raise CannotConnectToOMSException
return exit_code
def get_settings():
"""
Retrieve the configuration for this extension operation
"""
global SettingsDict
public_settings = None
protected_settings = None
if HUtilObject is not None:
public_settings = HUtilObject.get_public_settings()
protected_settings = HUtilObject.get_protected_settings()
elif SettingsDict is not None:
public_settings = SettingsDict['public_settings']
protected_settings = SettingsDict['protected_settings']
else:
SettingsDict = {}
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
seq_no = get_latest_seq_no()
settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))
try:
with open(settings_path, 'r') as settings_file:
settings_txt = settings_file.read()
settings = json.loads(settings_txt)
h_settings = settings['runtimeSettings'][0]['handlerSettings']
public_settings = h_settings['publicSettings']
SettingsDict['public_settings'] = public_settings
except:
hutil_log_error('Unable to load handler settings from ' \
'{0}'.format(settings_path))
if (h_settings.has_key('protectedSettings')
and h_settings.has_key('protectedSettingsCertThumbprint')
and h_settings['protectedSettings'] is not None
and h_settings['protectedSettingsCertThumbprint'] is not None):
encoded_settings = h_settings['protectedSettings']
settings_thumbprint = h_settings['protectedSettingsCertThumbprint']
encoded_cert_path = os.path.join('/var/lib/waagent',
'{0}.crt'.format(
settings_thumbprint))
encoded_key_path = os.path.join('/var/lib/waagent',
'{0}.prv'.format(
settings_thumbprint))
decoded_settings = base64.standard_b64decode(encoded_settings)
decrypt_cmd = 'openssl smime -inform DER -decrypt -recip {0} ' \
'-inkey {1}'.format(encoded_cert_path,
encoded_key_path)
try:
session = subprocess.Popen([decrypt_cmd], shell = True,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
output = session.communicate(decoded_settings)
except OSError:
pass
protected_settings_str = output[0]
if protected_settings_str is None:
log_and_exit('Enable', 1, 'Failed decrypting ' \
'protectedSettings')
protected_settings = ''
try:
protected_settings = json.loads(protected_settings_str)
except:
hutil_log_error('JSON exception decoding protected settings')
SettingsDict['protected_settings'] = protected_settings
return public_settings, protected_settings
def update_status_file(operation, exit_code, exit_status, message):
"""
Mimic HandlerUtil method do_status_report in case hutil method is not
available
Write status to status file
"""
handler_env = get_handler_env()
try:
extension_version = str(handler_env['version'])
status_dir = str(handler_env['handlerEnvironment']['statusFolder'])
except:
extension_version = "1.0"
status_dir = os.path.join(os.getcwd(), 'status')
status_txt = [{
"version" : extension_version,
"timestampUTC" : time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"status" : {
"name" : "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux",
"operation" : operation,
"status" : exit_status,
"code" : exit_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
status_json = json.dumps(status_txt)
# Find the most recently changed config file and then use the
# corresponding status file
latest_seq_no = get_latest_seq_no()
status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))
status_tmp = '{0}.tmp'.format(status_path)
with open(status_tmp, 'w+') as tmp_file:
tmp_file.write(status_json)
os.rename(status_tmp, status_path)
def get_handler_env():
"""
Set and retrieve the contents of HandlerEnvironment.json as JSON
"""
global HandlerEnvironment
if HandlerEnvironment is None:
handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')
try:
with open(handler_env_path, 'r') as handler_env_file:
handler_env_txt = handler_env_file.read()
handler_env = json.loads(handler_env_txt)
if type(handler_env) == list:
handler_env = handler_env[0]
HandlerEnvironment = handler_env
except Exception as e:
waagent_log_error(str(e))
return HandlerEnvironment
def get_latest_seq_no():
"""
Determine the latest operation settings number to use
"""
global SettingsSequenceNumber
if SettingsSequenceNumber is None:
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
latest_seq_no = -1
cur_seq_no = -1
latest_time = None
try:
for dir_name, sub_dirs, file_names in os.walk(config_dir):
for file_name in file_names:
file_basename = os.path.basename(file_name)
match = re.match(r'[0-9]{1,10}\.settings', file_basename)
if match is None:
continue
cur_seq_no = int(file_basename.split('.')[0])
file_path = os.path.join(config_dir, file_name)
cur_time = os.path.getmtime(file_path)
if latest_time is None or cur_time > latest_time:
latest_time = cur_time
latest_seq_no = cur_seq_no
except:
pass
if latest_seq_no < 0:
latest_seq_no = 0
SettingsSequenceNumber = latest_seq_no
return SettingsSequenceNumber
def run_get_output(cmd, chk_err = False, log_cmd = True):
"""
Mimic waagent mothod RunGetOutput in case waagent is not available
Run shell command and return exit code and output
"""
if 'Utils.WAAgentUtil' in sys.modules:
# WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput
# If checking the number of parameters fails, pass 2
try:
sig = inspect.signature(waagent.RunGetOutput)
params = sig.parameters
waagent_params = len(params)
except:
try:
spec = inspect.getargspec(waagent.RunGetOutput)
params = spec.args
waagent_params = len(params)
except:
waagent_params = 2
if waagent_params >= 3:
exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)
else:
exit_code, output = waagent.RunGetOutput(cmd, chk_err)
else:
try:
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,
shell = True)
exit_code = 0
except subprocess.CalledProcessError as e:
exit_code = e.returncode
output = e.output
return exit_code, output.encode('utf-8').strip()
def get_tenant_id_from_metadata_api(vm_resource_id):
"""
Retrieve the Tenant ID using the Metadata API of the VM resource ID
Since we have not authenticated, the Metadata API will throw a 401, but
the headers of the 401 response will contain the tenant ID
"""
tenant_id = None
metadata_endpoint = get_metadata_api_endpoint(vm_resource_id)
metadata_request = urllib2.Request(metadata_endpoint)
try:
# This request should fail with code 401
metadata_response = urllib2.urlopen(metadata_request)
hutil_log_info('Request to Metadata API did not fail as expected; ' \
'attempting to use headers from response to ' \
'determine Tenant ID')
metadata_headers = metadata_response.headers
except urllib2.HTTPError as e:
metadata_headers = e.headers
if metadata_headers is not None and 'WWW-Authenticate' in metadata_headers:
auth_header = metadata_headers['WWW-Authenticate']
auth_header_regex = r'authorization_uri=\"https:\/\/login\.windows\.net/(' + GUIDRegex + ')\"'
auth_header_search = re.compile(auth_header_regex)
auth_header_matches = auth_header_search.search(auth_header)
if not auth_header_matches:
raise MetadataAPIException('The WWW-Authenticate header in the ' \
'response does not contain expected ' \
'authorization_uri format')
else:
tenant_id = auth_header_matches.group(1)
else:
raise MetadataAPIException('Expected information from Metadata API ' \
'is not present')
return tenant_id
def get_metadata_api_endpoint(vm_resource_id):
"""
Extrapolate Metadata API endpoint from VM Resource ID
Example VM resource ID: /subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup/providers/Microsoft.Compute/virtualMachines/lagalbraOCUb16C
Corresponding example endpoint: https://management.azure.com/subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup?api-version=2016-09-01
"""
# Will match for ARM and Classic VMs, Availability Sets, VM Scale Sets
vm_resource_id_regex = r'^\/subscriptions\/(' + GUIDRegex + ')\/' \
'resourceGroups\/([^\/]+)\/providers\/Microsoft' \
'\.(?:Classic){0,1}Compute\/(?:virtualMachines|' \
'availabilitySets|virtualMachineScaleSets)' \
'\/[^\/]+$'
vm_resource_id_search = re.compile(vm_resource_id_regex, re.M)
vm_resource_id_matches = vm_resource_id_search.search(vm_resource_id)
if not vm_resource_id_matches:
raise InvalidParameterError('VM Resource ID is invalid')
else:
subscription_id = vm_resource_id_matches.group(1)
resource_group = vm_resource_id_matches.group(2)
metadata_url = 'https://management.azure.com/subscriptions/{0}' \
'/resourceGroups/{1}'.format(subscription_id,
resource_group)
metadata_data = urllib.urlencode({'api-version' : '2016-09-01'})
metadata_endpoint = '{0}?{1}'.format(metadata_url, metadata_data)
return metadata_endpoint
def get_access_token(tenant_id, resource):
"""
Retrieve an OAuth token by sending an OAuth2 token exchange
request to the local URL that the ManagedIdentity extension is
listening to
"""
# Extract the endpoint that the ManagedIdentity extension is listening on
with open(ManagedIdentityExtListeningURLPath, 'r') as listening_file:
listening_settings_txt = listening_file.read()
try:
listening_settings = json.loads(listening_settings_txt)
listening_url = listening_settings['url']
except:
raise ManagedIdentityExtException('Could not extract listening URL ' \
'from settings file')
# Send an OAuth token exchange request
oauth_data = {'authority' : 'https://login.microsoftonline.com/' \
'{0}'.format(tenant_id),
'resource' : resource
}
oauth_request = urllib2.Request(listening_url + '/oauth2/token',
urllib.urlencode(oauth_data))
oauth_request.add_header('Metadata', 'true')
try:
oauth_response = urllib2.urlopen(oauth_request)
oauth_response_txt = oauth_response.read()
except urllib2.HTTPError as e:
hutil_log_error('Request to ManagedIdentity extension listening URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from ManagedIdentity extension: ' \
'{0}'.format(e.read()))
raise ManagedIdentityExtException('Request to listening URL failed ' \
'with HTTPError {0}'.format(e))
except:
raise ManagedIdentityExtException('Unexpected error from request to ' \
'listening URL')
try:
oauth_response_json = json.loads(oauth_response_txt)
except:
raise ManagedIdentityExtException('Error parsing JSON from ' \
'listening URL response')
if (oauth_response_json is not None
and 'access_token' in oauth_response_json):
return oauth_response_json['access_token']
else:
raise ManagedIdentityExtException('Could not retrieve access token ' \
'in the listening URL response')
def get_workspace_info_from_oms(vm_resource_id, tenant_id, access_token):
"""
Send a request to the OMS service with the VM information to
determine the workspace the OMSAgent should onboard to
"""
oms_data = {'ResourceId' : vm_resource_id,
'TenantId' : tenant_id,
'JwtToken' : access_token
}
oms_request_json = json.dumps(oms_data)
oms_request = urllib2.Request(OMSServiceValidationEndpoint)
oms_request.add_header('Content-Type', 'application/json')
retries = 5
initial_sleep_time = AutoManagedWorkspaceCreationSleepSeconds
sleep_increase_factor = 1
try_count = 0
sleep_time = initial_sleep_time
# Workspace may not be provisioned yet; sleep and retry if
# provisioning has been accepted
while try_count <= retries:
try:
oms_response = urllib2.urlopen(oms_request, oms_request_json)
oms_response_txt = oms_response.read()
except urllib2.HTTPError as e:
hutil_log_error('Request to OMS threw HTTPError: {0}'.format(e))
hutil_log_info('Response from OMS: {0}'.format(e.read()))
raise OMSServiceOneClickException('ValidateMachineIdentity ' \
'request returned an error ' \
'HTTP code: {0}'.format(e))
except:
raise OMSServiceOneClickException('Unexpected error from ' \
'ValidateMachineIdentity ' \
'request')
should_retry = retry_get_workspace_info_from_oms(oms_response)
if not should_retry:
# TESTED
break
elif try_count == retries:
# TESTED
hutil_log_error('Retries for ValidateMachineIdentity request ran ' \
'out: required workspace information cannot be ' \
'extracted')
raise OneClickException('Workspace provisioning did not complete ' \
'within the allotted time')
# TESTED
try_count += 1
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if not oms_response_txt:
raise OMSServiceOneClickException('Body from ValidateMachineIdentity ' \
'response is empty; required ' \
'workspace information cannot be ' \
'extracted')
try:
oms_response_json = json.loads(oms_response_txt)
except:
raise OMSServiceOneClickException('Error parsing JSON from ' \
'ValidateMachineIdentity response')
if (oms_response_json is not None and 'WorkspaceId' in oms_response_json
and 'WorkspaceKey' in oms_response_json):
return oms_response_json
else:
hutil_log_error('Could not retrieve both workspace ID and key from ' \
'the OMS service response {0}; cannot determine ' \
'workspace ID and key'.format(oms_response_json))
raise OMSServiceOneClickException('Required workspace information ' \
'was not found in the ' \
'ValidateMachineIdentity response')
def retry_get_workspace_info_from_oms(oms_response):
"""
Return True to retry if the response from OMS for the
ValidateMachineIdentity request incidates that the request has
been accepted, but the managed workspace is still being
provisioned
"""
try:
oms_response_http_code = oms_response.getcode()
except:
hutil_log_error('Unable to get HTTP code from OMS repsonse')
return False
if (oms_response_http_code is 202 or oms_response_http_code is 204
or oms_response_http_code is 404):
hutil_log_info('Retrying ValidateMachineIdentity OMS request ' \
'because workspace is still being provisioned; HTTP ' \
'code from OMS is {0}'.format(oms_response_http_code))
return True
else:
hutil_log_info('Workspace is provisioned; HTTP code from OMS is ' \
'{0}'.format(oms_response_http_code))
return False
def init_waagent_logger():
"""
Initialize waagent logger
If waagent has not been imported, catch the exception
"""
try:
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)
except Exception as e:
print('Unable to initialize waagent log because of exception ' \
'{0}'.format(e))
def waagent_log_info(message):
"""
Log informational message, being cautious of possibility that waagent may
not be imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Log(message)
else:
print('Info: {0}'.format(message))
def waagent_log_error(message):
"""
Log error message, being cautious of possibility that waagent may not be
imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Error(message)
else:
print('Error: {0}'.format(message))
def hutil_log_info(message):
"""
Log informational message, being cautious of possibility that hutil may
not be imported and configured
"""
if HUtilObject is not None:
HUtilObject.log(message)
else:
print('Info: {0}'.format(message))
def hutil_log_error(message):
"""
Log error message, being cautious of possibility that hutil may not be
imported and configured
"""
if HUtilObject is not None:
HUtilObject.error(message)
else:
print('Error: {0}'.format(message))
def log_and_exit(operation, exit_code = 1, message = ''):
"""
Log the exit message and perform the exit
"""
if exit_code is 0:
waagent_log_info(message)
hutil_log_info(message)
exit_status = 'success'
else:
waagent_log_error(message)
hutil_log_error(message)
exit_status = 'failed'
if HUtilObject is not None:
HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),
message)
else:
update_status_file(operation, str(exit_code), exit_status, message)
sys.exit(exit_code)
# Exceptions
# If these exceptions are expected to be caught by the main method, they
# include an error_code field with an integer with which to exit from main
class OmsAgentForLinuxException(Exception):
"""
Base exception class for all exceptions; as such, its error code is the
basic error code traditionally returned in Linux: 1
"""
error_code = 1
def get_error_message(self, operation):
"""
Return a descriptive error message based on this type of exception
"""
return '{0} failed with exit code {1}'.format(operation,
self.error_code)
class ParameterMissingException(OmsAgentForLinuxException):
"""
There is a missing parameter for the OmsAgentForLinux Extension
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to a missing parameter: {1}'.format(operation,
self)
class InvalidParameterError(OmsAgentForLinuxException):
"""
There is an invalid parameter for the OmsAgentForLinux Extension
ex. Workspace ID does not match GUID regex
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to an invalid parameter: {1}'.format(operation,
self)
class UnwantedMultipleConnectionsException(OmsAgentForLinuxException):
"""
This VM is already connected to a different Log Analytics workspace
and stopOnMultipleConnections is set to true
"""
error_code = UnwantedMultipleConnectionsErrorCode
def get_error_message(self, operation):
return '{0} failed due to multiple connections: {1}'.format(operation,
self)
class CannotConnectToOMSException(OmsAgentForLinuxException):
"""
The OMSAgent cannot connect to the OMS service
"""
error_code = CannotConnectToOMSErrorCode # error code to indicate no internet access
def get_error_message(self, operation):
return 'The agent could not connect to the Microsoft Operations ' \
'Management Suite service. Please check that the system ' \
'either has Internet access, or that a valid HTTP proxy has ' \
'been configured for the agent. Please also check the ' \
'correctness of the workspace ID.'
class OneClickException(OmsAgentForLinuxException):
"""
A generic exception for OneClick-related issues
"""
error_code = OneClickErrorCode
def get_error_message(self, operation):
return 'Encountered an issue related to the OneClick scenario: ' \
'{0}'.format(self)
class ManagedIdentityExtMissingException(OneClickException):
"""
This extension being present is required for the OneClick scenario
"""
error_code = ManagedIdentityExtMissingErrorCode
def get_error_message(self, operation):
return 'The ManagedIdentity extension is required to be installed ' \
'for Automatic Management to be enabled. Please set ' \
'EnableAutomaticManagement to false in public settings or ' \
'install the ManagedIdentityExtensionForLinux Azure VM ' \
'extension.'
class ManagedIdentityExtException(OneClickException):
"""
Thrown when we encounter an issue with ManagedIdentityExtensionForLinux
"""
error_code = ManagedIdentityExtErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the ManagedIdentity extension: ' \
'{0}'.format(self)
class MetadataAPIException(OneClickException):
"""
Thrown when we encounter an issue with Metadata API
"""
error_code = MetadataAPIErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the Metadata API: {0}'.format(self)
class OMSServiceOneClickException(OneClickException):
"""
Thrown when prerequisites were satisfied but could not retrieve the managed
workspace information from OMS service
"""
error_code = OMSServiceOneClickErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the OMS service: ' \
'{0}'.format(self)
if __name__ == '__main__' :
main()
| apache-2.0 | -4,759,873,009,109,484,000 | 39.030915 | 217 | 0.613296 | false |
funkbit/django-funky-user | funky_user/urls/auth.py | 1 | 1457 | from django.conf.urls import patterns, url
from django.utils.translation import ugettext_lazy as _
from funky_user.conf import PASSWORD_TOKEN
from funky_user.forms import PasswordResetForm
# Built-in Django views
urlpatterns = patterns('django.contrib.auth.views',
url(_(r'^login/$'), 'login',
{'template_name': 'auth/login.html'}, name='user-login'),
url(_(r'^logout/$'), 'logout',
{'template_name': 'auth/logged_out.html'}, name='user-logout'),
url(_(r'^password-change/$'), 'password_change',
{'template_name': 'auth/password_change_form.html'}, name='user-password-change'),
url(_(r'^password-change/done/$'), 'password_change_done',
{'template_name': 'auth/password_change_done.html'}, name='user-password-change-done'),
url(_(r'^password-reset/$'), 'password_reset',
{'template_name': 'auth/password_reset_form.html',
'password_reset_form': PasswordResetForm}, name='user-password-reset'),
url(_(r'^password-reset/done/$'), 'password_reset_done',
{'template_name': 'auth/password_reset_done.html'}, name='user-password-reset-done'),
url(_(r'^reset/%s/$') % PASSWORD_TOKEN, 'password_reset_confirm',
{'template_name': 'auth/password_reset_confirm.html'}, name='user-password-reset-confirm'),
url(_(r'^reset/done/$'), 'password_reset_complete',
{'template_name': 'auth/password_reset_complete.html'}, name='user-password-reset-complete'),
)
| bsd-2-clause | -9,037,615,075,736,195,000 | 44.53125 | 101 | 0.659574 | false |
NERC-CEH/arcapi | arcapi_test.py | 1 | 25425 | """
#-------------------------------------------------------------------------------
# Name: arcapi_test
# Purpose: Tests for arcapi module.
#
# Author: Filip Kral, Caleb Mackay
#
# Created: 01/02/2014
# Updated: 05/15/2014
# Licence: LGPL v3
#-------------------------------------------------------------------------------
# Most of the functions operate on potentially complex data, or require manual
# checking of results, and therefore testing is rather difficult.
#
# Everybody is encouraged to contribute to tests.
#-------------------------------------------------------------------------------
"""
import unittest
import os
import sys
import arcpy
import arcapi as ap
class TestGlobalFunctions(unittest.TestCase):
def setUp(self):
# access testing data
try:
self.testingfolder = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'testing')
except:
self.testingfolder = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])), 'testing')
self.testing_gdb = os.path.join(self.testingfolder, 'testing.gdb')
#self.t_table = os.path.join(self.testing_gdb, '\left_i_right')
#self.t_fc = os.path.join(self.testing_gdb, 'left_i_right')
#self.t_cols = ('OBJECTID', 'Shape', 'CCARM2', 'POINT_X', u'POINT_Y', u'ROUND_X', 'ROUND_Y', 'name', 'propagatedName', 'fullName', 'GID', 'DOWNGID', 'HA_NUM','STRAHLER', 'SHREVE', 'OS_NAME', 'FNODE_FULL', 'TNODE_FULL', 'NAMENOXML', 'Shape_Length')
self.t_fc = os.path.join(self.testing_gdb, 'ne_110m_admin_0_countries')
self.t_fc2 = os.path.join(self.testing_gdb, 'Illinois')
self.t_tab = os.path.join(self.testing_gdb, 'Illinois_county_info')
self.t_cols = ('OBJECTID','Shape','ScaleRank','LabelRank','FeatureCla',
'SOVEREIGNT','SOV_A3','ADM0_DIF','LEVEL','TYPE','ADMIN',
'ADM0_A3','GEOU_DIF','GEOUNIT','GU_A3','SU_DIF','SUBUNIT',
'SU_A3','NAME','ABBREV','POSTAL','NAME_FORMA','TERR_',
'NAME_SORT','MAP_COLOR','POP_EST','GDP_MD_EST','FIPS_10_',
'ISO_A2','ISO_A3','ISO_N3','Shape_Length','Shape_Area')
pass
def tearDown(self):
pass
def testnames(self):
est = map(str, tuple(ap.names(self.t_fc)))
obs = ('OBJECTID','Shape','ScaleRank','LabelRank','FeatureCla',
'SOVEREIGNT','SOV_A3','ADM0_DIF','LEVEL','TYPE','ADMIN',
'ADM0_A3','GEOU_DIF','GEOUNIT','GU_A3','SU_DIF','SUBUNIT',
'SU_A3','NAME','ABBREV','POSTAL','NAME_FORMA','TERR_',
'NAME_SORT','MAP_COLOR','POP_EST','GDP_MD_EST','FIPS_10_',
'ISO_A2','ISO_A3','ISO_N3','Shape_Length','Shape_Area')
self.assertEqual(tuple(est), obs)
pass
def testtypes(self):
est = map(str, tuple(ap.types(self.t_fc)))
obs = ('OID','Geometry','SmallInteger','SmallInteger','String','String',
'String','Single','Single','String','String','String','Single',
'String', 'String','Single','String','String','String','String',
'String','String', 'String','String','Single','Double','Double',
'Single','String','String', 'Single','Double','Double')
pass
def testnrow(self):
est = ap.nrow(self.t_fc)
obs = 177
self.assertEqual(est, obs)
pass
def testvalues(self):
fc = self.t_fc
w = '"OBJECTID" < 11'
vals1 = ap.values(fc, 'Shape_Length', w)
vals2 = ap.values(fc, 'Shape_Length', w, 'Shape_Length ASC')
vals3 = ap.values(fc, 'SHAPE@XY', w)
vals4 = ap.values(fc, 'SHAPE@XY;Shape_Length', w, 'Shape_Length DESC')
vals5 = ap.values(fc, 'OBJECTID')[0:10]
est = all([len(vi) == 10 for vi in [vals1, vals2, vals3, vals4, vals5]])
self.assertTrue(est)
def testvalues_crosscolumns(self):
# the values function requires columns included in the o parameter
# to be included in the col parameter too, otherwise an invalid
# sql statement is generated.
fc = self.t_fc
w = '"OBJECTID" < 11'
with self.assertRaises(RuntimeError):
vals = ap.values(fc, 'SHAPE@XY', w, 'Shape_Length ASC')
pass
## def testdistinct(self):
## pass
def testhead(self):
est = 5
hd = ap.head(self.t_fc, est, geoms = " ", verbose=False)
obs = len(hd[0])
self.assertEqual(est, obs)
pass
def testchart(self):
obs = r'c:\temp\chart.jpg'
t_fc = self.t_fc
est = ap.chart(t_fc, obs, texts = {'txt': 'Element txt'}, openit=False)
self.assertEqual(str(est).lower(), str(obs).lower())
pass
def testplot(self):
pic = r'c:\temp\plot.png'
x = xrange(20)
ap.plot(x, out_file=pic, openit=False)
y = xrange(50,70)
ap.plot(x, y, pic, 'Main', 'X [m]', 'Y [m]', 'o', 'k', openit=False)
os.remove(pic)
with self.assertRaises(ap.ArcapiError):
ap.plot(x, [1,2,3], pic, 'Main', 'X [m]', 'Y [m]', 'o', 'k', openit=False)
pass
def testhist(self):
pic = r'c:\temp\plot.png'
x = xrange(20)
h = ap.hist(x, out_file=pic, openit=False)
h = ap.hist(x, pic, main='Main', xlab='Xlbl', log=True, openit=False)
os.remove(pic)
self.assertFalse(os.path.exists(pic))
def testbars(self):
pic = r'c:\temp\plot.png'
x = xrange(20)
ap.bars(x, out_file=pic, openit=False)
y = xrange(50,70)
ap.bars(x, out_file=pic, labels=y, main='Main', xlab='X', ylab='Y', openit=False)
ap.bars([], openit=False)
os.remove(pic)
self.assertFalse(os.path.exists(pic))
def testpie(self):
pic = r'c:\temp\plot.png'
x = [1,2,3,4,5,6,7]
y = [1,1,2,2,3,3,3]
ap.pie(x, openit=False)
ap.pie(x, y, main="A chart", out_file=pic, autopct='%1.1f%%', openit=False)
ap.pie(x=[], y=[], openit=False)
os.remove(pic)
self.assertFalse(os.path.exists(pic))
def testrename_col(self):
import arcpy
import tempfile
owo = arcpy.env.overwriteOutput
arcpy.env.overwriteOutput = True
tmpfc = os.path.join(tempfile.gettempdir(), "tmp")
tmpfc = arcpy.CopyFeatures_management(self.t_fc, tmpfc).getOutput(0)
est = ap.rename_col(tmpfc, "ABBREV", "ABBREVIATION")
obs = "ABBREVIATI"
arcpy.Delete_management(tmpfc)
arcpy.env.overwriteOutput = owo
self.assertEqual(est, obs)
pass
def testtlist_to_table(self):
colnames = ['NAME', 'POP_EST']
coltypes = ['TEXT', 'DOUBLE']
collengths = [250, '#']
coldefs = zip(colnames, coltypes, collengths)
coldefs2 = ['NAME:TEXT', 'POP_EST:DOUBLE']
# read data
tl = []
with arcpy.da.SearchCursor(self.t_fc, colnames) as sc:
for row in sc:
tl.append(tuple(row))
# write as table using log column definition
ot = arcpy.CreateScratchName('tmp.dbf', workspace='c:\\temp')
ot = ap.tlist_to_table(tl, ot, coldefs, -9, 'nullText')
est1 = int(arcpy.GetCount_management(ot).getOutput(0))
# write as table using short column definition
ot = arcpy.CreateScratchName('tmp.dbf', workspace='c:\\temp')
ot = ap.tlist_to_table(tl, ot, coldefs2, -9, 'nullText')
est2 = int(arcpy.GetCount_management(ot).getOutput(0))
obs = int(arcpy.GetCount_management(self.t_fc).getOutput(0))
arcpy.Delete_management(ot)
self.assertTrue(all((est1 == obs, est2 == obs)))
pass
## def testdocu(self):
## pass
def testmeta(self):
fcws = 'c:\\temp'
tempshp = arcpy.CreateScratchName('tmp.dbf', workspace=fcws).replace('.dbf', '.shp')
fcnm = os.path.basename(tempshp)
# testing entries
ttl,pps,abt = "Bar","example", "Column Spam means eggs"
fc = arcpy.FeatureClassToFeatureClass_conversion(
self.t_fc,
fcws,
fcnm
).getOutput(0)
ap.meta(fc, 'OVERWRITE', title=ttl)
editted = ap.meta(fc, 'append', purpose=pps, abstract=abt)
editted = ap.meta(fc, 'overwrite', title=ttl, purpose=pps, abstract=abt)
retrieved = ap.meta(fc)
ap.dlt(fc)
self.assertEqual(set(editted.values()), set(retrieved.values()))
## def testmsg(self):
## pass
def testfrequency(self):
est = ap.frequency([1,1,2,3,4,4,4])
obs = {1: 2, 2: 1, 3: 1, 4: 3}
samekeys = set(est.keys()) == set(obs.keys())
good = all([samekeys] + [est[i] == obs[i] for i in est])
self.assertTrue(good)
pass
def testlist_environments(self):
envs = ap.list_environments([])
self.assertEqual(len(envs), 50)
pass
def testoidF(self):
est = ap.oidF(self.t_fc)
obs = "OBJECTID"
self.assertEqual(str(est), obs)
pass
def testshpF(self):
est = ap.shpF(self.t_fc)
obs = "Shape"
self.assertEqual(str(est), obs)
pass
def testtstamp(self):
est = []
est.append(len(ap.tstamp()) == len('20140216184029'))
est.append(len(ap.tstamp("lr")) == len('lr20140216184045'))
est.append(len(ap.tstamp("lr", "%H%M%S")) == len('lr184045'))
est.append(len(ap.tstamp("lr", "%H%M%S")) == len('lr184045'))
est.append(len(ap.tstamp("lr", "%H%M%S", s=('run',1))) == len('lr184527_run_1'))
obs = [True, True, True, True, True]
self.assertEqual(est, obs)
pass
def testdlt(self):
est = []
wc = '"OBJECTID" < 11'
lr = arcpy.management.MakeFeatureLayer(self.t_fc, "lr", wc).getOutput(0)
# TODO: test for deleting layers won't pass even though ap.dlt works
#print lr
#print arcpy.Exists(lr)
tempfc = 'in_memory\\tmp'
if arcpy.Exists(tempfc):
arcpy.Delete_management(tempfc)
tmpfc = arcpy.CopyFeatures_management(lr, tempfc).getOutput(0)
tempshp = arcpy.CreateScratchName('tmp.dbf', workspace='c:\\temp').replace('.dbf', '.shp')
fc = arcpy.CopyFeatures_management(tmpfc, tempshp).getOutput(0)
ap.dlt(lr)
est.append(ap.dlt(tmpfc))
est.append(ap.dlt(fc))
est.append(ap.dlt('this does not exist'))
self.assertEquals(est, [True, True, False])
pass
def testcleanup(self):
x = []
out = arcpy.CreateScratchName("tmp", workspace=arcpy.env.scratchGDB)
x.append(arcpy.management.Copy(self.t_fc, out).getOutput(0))
est = ap.cleanup(x)
obs = 0
self.assertEqual(est, obs)
def testto_points(self):
obs = 10
wc = '"OBJECTID" < ' + str(obs + 1)
ofc = arcpy.CreateScratchName("tmp_out.dbf", workspace="c:\\temp").replace('.dbf', '.shp')
cs = 27700
ptfc = ap.to_points(self.t_fc, ofc, "POP_EST", "GDP_MD_EST", cs, w = wc)
est = int(arcpy.GetCount_management(ptfc).getOutput(0))
arcpy.Delete_management(ptfc)
self.assertEqual(est, obs)
pass
## def testwsp(self):
## pass
##
## def testswsp(self):
## pass
def testto_scratch(self):
est = []
obs = []
arcpy.env.scratchWorkspace = arcpy.env.scratchGDB
s = arcpy.env.scratchWorkspace
est.append(ap.to_scratch('foo', 0))
obs.append(ap.os.path.join(s, 'foo'))
est.append(ap.to_scratch('foo', 1))
obs.append(os.path.join(s, 'foo0'))
est.append(ap.to_scratch('foo.shp', 0))
obs.append(os.path.join(s, 'foo_shp'))
est.append(ap.to_scratch('foo.shp', 1))
obs.append(os.path.join(s, 'foo_shp0'))
# not tested for file based workspaces
arcpy.env.scratchWorkspace = arcpy.env.scratchFolder
a = arcpy.env.scratchWorkspace
ap.to_scratch('foo', 0) == os.path.join(s, 'foo')
ap.to_scratch('foo', 1) == os.path.join(s, 'foo0')
ap.to_scratch('foo.shp', 0) == os.path.join(s, 'foo_shp')
ap.to_scratch('foo.shp', 1) == os.path.join(s, 'foo_shp0')
eq = all([ei == oi for ei,oi in zip(est, obs)])
self.assertTrue(eq)
def testremap_sa(self):
est = []
remapped = ap.remap_3d(10,50,10)
est.append(remapped == '10 20 1;20 30 2;30 40 3;40 50 4')
remapped = ap.remap_3d(0,5,1)
est.append(remapped == '0 1 1;1 2 2;2 3 3;3 4 4;4 5 5')
remapped = ap.remap_3d(-10,10,5)
est.append(remapped == '-10 -5 1;-5 0 2;0 5 3;5 10 4')
remapped = ap.remap_3d(-10,10,-5)
est.append(remapped == '')
remapped = ap.remap_3d(10,-20,-7)
est.append(remapped == '10 3 1;3 -4 2;-4 -11 3;-11 -18 4;-18 -25 5')
self.assertTrue(all(est))
def testremap_3d(self):
est = []
remapped = ap.remap_sa(10,50,10)
ob = [[[10, 20], 1], [[20, 30], 2], [[30, 40], 3], [[40, 50], 4]]
est.append(remapped == ob)
remapped = ap.remap_sa(0,5,1)
ob = [[[0, 1], 1], [[1, 2], 2], [[2, 3], 3], [[3, 4], 4], [[4, 5], 5]]
est.append(remapped == ob)
remapped = ap.remap_sa(-10,10,5)
ob = [[[-10, -5], 1], [[-5, 0], 2], [[0, 5], 3], [[5, 10], 4]]
est.append(remapped == ob)
remapped = ap.remap_sa(-10,10,-5)
ob = []
est.append(remapped == ob)
remapped = ap.remap_sa(10,-20,-7)
ob = [
[[10, 3], 1], [[3, -4], 2], [[-4, -11], 3], [[-11, -18], 4],
[[-18, -25], 5]
]
est.append(remapped == ob)
self.assertTrue(all(est))
def testfind(self):
self.testingfolder = os.path.join(os.path.dirname(sys.argv[0]), 'testing')
obs = [1, 5]
est = []
findings = ap.find('*.shp', self.testingfolder)
est.append(len(findings))
findings = ap.find('*110m*', self.testingfolder)
est.append(len(findings))
self.assertEqual(est, obs)
def testfixArgs(self):
list_args = 'C:\Temp\Shapefiles\Contours.shp;C:\Temp\Shapefiles\Contours.shp'
est = ap.fixArgs(list_args, list)
obs = ['C:\\Temp\\Shapefiles\\Contours.shp', 'C:\\Temp\\Shapefiles\\Contours.shp']
self.assertEqual(est, obs)
est = ap.fixArgs('false', bool)
self.assertEqual(est, False)
pass
def testint_to_float(self):
_dir = os.path.join(self.testingfolder, r'testing_files\rasters')
ndvi = os.path.join(_dir, 'dh_july_ndvi')
ob = round(arcpy.Raster(ndvi).maximum, 5)
int_rst = os.path.join(_dir, 'ndvi_int')
est = os.path.join(_dir, 'ndvi_tst')
if arcpy.CheckExtension('Spatial') == 'Available':
arcpy.CheckOutExtension('Spatial')
arcpy.sa.Int(arcpy.sa.Times(ndvi, 1000000)).save(int_rst)
arcpy.CheckInExtension('Spatial')
ap.int_to_float(int_rst, est, 6)
self.assertEqual(ob, round(arcpy.Raster(est).maximum, 5))
for rast in [int_rst, est]:
try:
arcpy.Delete_management(rast)
except:pass
pass
def testfill_no_data(self):
_dir = os.path.join(self.testingfolder, r'testing_files\rasters')
ndvi = os.path.join(_dir, 'dh_july_ndvi')
est = os.path.join(_dir, 'ndvi_fill')
null = os.path.join(_dir, 'null_rst')
if arcpy.CheckExtension('Spatial') == 'Available':
ap.fill_no_data(ndvi, est, 10, 10)
arcpy.CheckOutExtension('Spatial')
arcpy.sa.IsNull(est).save(null)
self.assertEqual(arcpy.Raster(null).maximum, 0)
arcpy.CheckInExtension('Spatial')
for rast in [est, null]:
try:
arcpy.Delete_management(rast)
except:pass
pass
def testmeters_to_feet(self):
_dir = os.path.join(self.testingfolder, r'testing_files\rasters')
dem = os.path.join(_dir, 'dh30m_dem')
est = os.path.join(_dir, 'dem_ft')
ap.meters_to_feet(dem, est)
self.assertEqual(int(arcpy.Raster(est).maximum), 6244)
try:
arcpy.Delete_management(est)
except:
pass
pass
def testcopy_schema(self):
tmp = r'in_memory\schema_test'
ap.copy_schema(self.t_fc, tmp)
self.assertTrue(arcpy.Exists(tmp))
arcpy.Delete_management(tmp)
pass
def testmake_poly_from_extent(self):
desc = arcpy.Describe(self.t_fc2)
ext = desc.extent
sr = desc.spatialReference
est = ap.make_poly_from_extent(ext, sr)
self.assertEqual(str(ext), str(est.extent))
pass
def testlist_all_fcs(self):
est = ap.list_all_fcs(self.testing_gdb, '*', 'All', True)
obs = ['Illinois', 'ne_110m_admin_0_countries']
self.assertEqual(est, obs)
pass
def testfield_list(self):
il = os.path.join(self.testing_gdb, 'Illinois')
est = ap.field_list(il, ['state_fips', 'cnty_fips'])
obs = ['OBJECTID', 'Shape', 'NAME', 'STATE_NAME',
'FIPS', 'Shape_Length', 'Shape_Area']
self.assertEqual(est, obs)
pass
def testget_field_type(self):
il = os.path.join(self.testing_gdb, 'Illinois')
est = ap.get_field_type('NAME', il)
self.assertEqual(est, 'TEXT')
pass
def testmatch_field(self):
fc = os.path.join(self.testing_gdb, 'Illinois')
est = ap.match_field(fc, '*fips', True)
obs = ['STATE_FIPS', 'CNTY_FIPS', 'FIPS']
self.assertEqual(est, obs)
pass
def testadd_fields_from_table(self):
fc = os.path.join(self.testing_gdb, 'Illinois')
copy = fc + '_copy'
if arcpy.Exists(copy):
arcpy.Delete_management(copy)
arcpy.CopyFeatures_management(fc, copy)
flds = ['POP1990', 'POP2000']
tab = fc = os.path.join(self.testing_gdb, 'Illinois_county_info')
ap.add_fields_from_table(copy, tab, flds)
est = [f.name for f in arcpy.ListFields(copy)]
try:
arcpy.Delete_management(copy)
except: pass
for f in flds:
self.assertTrue(f in est)
pass
def testcreate_field_name(self):
fc = os.path.join(self.testing_gdb, 'Illinois')
est = ap.create_field_name(fc, 'NAME')
self.assertEqual(est, 'NAME_1')
pass
def testjoin_using_dict(self):
if arcpy.Exists(r'in_memory\copy'):
arcpy.Delete_management(r'in_memory\copy')
fc = os.path.join(self.testing_gdb, 'Illinois')
copy = fc + '_copy'
if arcpy.Exists(copy):
arcpy.Delete_management(copy)
arcpy.CopyFeatures_management(fc, copy)
flds = ['POP1990', 'POP2000']
tab = fc = os.path.join(self.testing_gdb, 'Illinois_county_info')
ap.join_using_dict(copy, 'CNTY_FIPS', tab, 'CNTY_FIPS', flds)
est = [f.name for f in arcpy.ListFields(copy)]
try:
arcpy.Delete_management(copy)
except: pass
for f in flds:
self.assertTrue(f in est)
pass
def testconcatenate(self):
est = ap.concatenate(['A','B','C'], '-')
self.assertEqual(est, 'A-B-C')
pass
def testconcatenate_fields(self):
if arcpy.Exists(r'in_memory\copy'):
arcpy.Delete_management(r'in_memory\copy')
fc = os.path.join(self.testing_gdb, 'Illinois')
copy = fc + '_copy'
if arcpy.Exists(copy):
arcpy.Delete_management(copy)
arcpy.CopyFeatures_management(fc, copy)
ap.concatenate_fields(copy, 'FULL', 75, ['NAME', 'STATE_NAME'], ' County, ')
obs = 'Jo Daviess County, Illinois'
with arcpy.da.SearchCursor(copy, 'FULL') as rows:
est = rows.next()[0]
del rows
try:
arcpy.Delete_management(copy)
except: pass
self.assertEqual(est, obs)
pass
def testcreate_pie_chart(self):
tab = fc = os.path.join(self.testing_gdb, 'Illinois_county_info')
oid = arcpy.AddFieldDelimiters(tab, arcpy.Describe(tab).OIDFieldName)
where = '{0} < 11'.format(oid)
tv = arcpy.MakeTableView_management(tab, 'IL_table', where)
fig = os.path.join(self.testingfolder, 'IL_county_pop.png')
# will use 'CNTY_FIPS' as case field since our pop field is
# already populated for each county
ap.create_pie_chart(fig, tv, 'NAME','POP2000', 'IL Counties')
self.assertTrue(os.path.exists(fig))
#### try:
#### arcpy.Delete_management(fig) # may want to look at the figure, pretty cool!
#### except:
#### pass
pass
def testcombine_pdfs(self):
_dir = os.path.dirname(self.testingfolder)
mapDoc = os.path.join(_dir, 'chart.mxd')
mxd = arcpy.mapping.MapDocument(mapDoc)
txt_elm = [elm for elm in arcpy.mapping.ListLayoutElements(mxd, 'TEXT_ELEMENT')
if elm.text == 'SomeText'][0]
del_list = []
for i in range(3):
txt_elm.text = "Hi, I'm page {0}".format(i)
pdf = os.path.join(_dir, 'test_{0}.pdf'.format(i))
arcpy.mapping.ExportToPDF(mxd, pdf, resolution=100)
del_list.append(pdf)
combined = os.path.join(_dir, 'combined.pdf')
del mxd
ap.combine_pdfs(combined, del_list)
self.assertTrue(os.path.exists(combined))
del_list.append(combined)
try:
for p in del_list:
arcpy.Delete_management(p)
except:
pass
pass
def testlist_data(self):
"""TODO: Write more tests for listing data"""
expected = ['testing.gdb','testing_files']
data = ap.list_data(self.testingfolder)
datas = str("".join(data))
all_in = all([(ei in datas) for ei in expected])
self.assertTrue(all_in)
def testrequest_text(self):
"""Basic test to get a page as text"""
d = ap.request('http://google.com')
self.assertNotEqual(d, '')
def testrequest_json(self):
"""Get json from arcgis sampleserver"""
u = 'http://sampleserver3.arcgisonline.com/ArcGIS/rest/services'
d = ap.request(u, {'f':'json'}, 'json')
items = [
isinstance(d, dict),
isinstance(d.get('services'), list),
isinstance(d.get('folders'), list),
isinstance(d.get('currentVersion'), int)
]
self.assertTrue(all(items))
def testrequest_xml(self):
"""Get XML from epsg.io"""
u = 'http://epsg.io/4326.xml'
d = ap.request(u, None, 'xml')
tg = str(d.tag)
tp = '{http://www.opengis.net/gml/3.2}GeographicCRS'
self.assertEqual(tg, tp)
def testarctype_to_ptype(self):
"""Converting from ArcGIS type strings to python types"""
self.assertTrue(ap.arctype_to_ptype("SHORT") is int)
self.assertTrue(ap.arctype_to_ptype("Short") is int)
self.assertTrue(ap.arctype_to_ptype("SHORT ") is int)
self.assertTrue(ap.arctype_to_ptype("TEXT") is str)
self.assertTrue(ap.arctype_to_ptype("STRING") is str)
self.assertTrue(ap.arctype_to_ptype("SMALLINTEGER") is int)
self.assertTrue(ap.arctype_to_ptype("LONG") is int)
self.assertTrue(ap.arctype_to_ptype("INTEGER") is int)
self.assertTrue(ap.arctype_to_ptype("DATE") is datetime.datetime)
self.assertTrue(ap.arctype_to_ptype("DATETIME") is datetime.datetime)
self.assertTrue(ap.arctype_to_ptype("FLOAT") is float)
self.assertTrue(ap.arctype_to_ptype("SINGLE") is float)
self.assertTrue(ap.arctype_to_ptype("DOUBLE") is float)
self.assertTrue(ap.arctype_to_ptype("") is str)
self.assertTrue(ap.arctype_to_ptype(None) is str)
with self.assertRaises(Exception):
ap.arctype_to_ptype()
pass
def testproject_coordinates(self):
"""Projecting list of coordinate pairs"""
dtt = 'TM65_To_WGS_1984_2 + OSGB_1936_To_WGS_1984_NGA_7PAR'
coordinates = [(240600.0, 375800.0), (245900.0, 372200.0)]
observed = ap.project_coordinates(coordinates, 29902, 27700, dtt)
expected = [
(53444.10991363949, 539226.5651404626),
(58422.59724314464, 535183.1931399861)
]
self.assertEqual(observed, expected)
pass
if __name__ == '__main__':
unittest.main(verbosity = 2)
| lgpl-3.0 | 1,971,623,132,743,390,700 | 36.004484 | 255 | 0.541121 | false |
olegartys/LabVariant | LabVariant.py | 1 | 2432 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# LabVariant.py
#
# Copyright 2014 olegartys <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from gi.repository import Gtk
class LabVariant :
global text_var_entry
text_var_entry = 92
class Handler :
"""Callback handlers for widgets"""
def onDestroy (self, *args) :
Gtk.main_quit ()
def onEntryChanged (self, obj, window) :
entry = window.entry
label = window.label
text_var_entry = window.var_entry.get_text()
for i in range(len(entry)):
x = entry[i].get_text()
if (len (x) != 0) and (len (text_var_entry) != 0 ) :
label[i].set_text(str((int(text_var_entry) % int(x)) + 1))
else :
label[i].set_text ("Не определено")
def __init__ (self) :
builder = Gtk.Builder ()
builder.add_from_file ("LabVariant.glade")
self.window = builder.get_object ("window1")
self.window.connect ("destroy", self.Handler().onDestroy)
self.window.connect ("delete_event", self.Handler().onDestroy)
# Getting entries and labels form *.glade
self.entry = []
self.label = []
for i in range (3) :
self.entry.append (builder.get_object ("entry0" + str(i)))
self.entry[i].connect ("changed", self.Handler().onEntryChanged, self)
self.label.append (builder.get_object ("label0" + str(i)))
self.label[i].set_text ("Не определено")
self.var_entry = builder.get_object ("var_entry")
self.var_entry.connect ("changed", self.Handler().onEntryChanged, self)
self.var_entry.set_text (str(text_var_entry))
def run (self) :
"""Run program"""
self.window.show_all ()
Gtk.main ()
def main():
app = LabVariant ()
app.run ()
return 0
if __name__ == '__main__':
main()
| gpl-3.0 | -3,682,123,395,968,886,300 | 29.481013 | 73 | 0.667359 | false |
ProgVal/irctest | irctest/patma.py | 1 | 4368 | """Pattern-matching utilities"""
import dataclasses
import re
from typing import Dict, List, Optional, Union
class Operator:
"""Used as a wildcards and operators when matching message arguments
(see assertMessageMatch and match_list)"""
def __init__(self) -> None:
pass
class AnyStr(Operator):
"""Wildcard matching any string"""
def __repr__(self) -> str:
return "AnyStr"
class AnyOptStr(Operator):
"""Wildcard matching any string as well as None"""
def __repr__(self) -> str:
return "AnyOptStr"
@dataclasses.dataclass(frozen=True)
class StrRe(Operator):
regexp: str
def __repr__(self) -> str:
return f"StrRe(r'{self.regexp}')"
@dataclasses.dataclass(frozen=True)
class NotStrRe(Operator):
regexp: str
def __repr__(self) -> str:
return f"NotStrRe(r'{self.regexp}')"
@dataclasses.dataclass(frozen=True)
class RemainingKeys(Operator):
"""Used in a dict pattern to match all remaining keys.
May only be present once."""
key: Operator
def __repr__(self) -> str:
return f"Keys({self.key!r})"
ANYSTR = AnyStr()
"""Singleton, spares two characters"""
ANYDICT = {RemainingKeys(ANYSTR): AnyOptStr()}
"""Matches any dictionary; useful to compare tags dict, eg.
`match_dict(got_tags, {"label": "foo", **ANYDICT})`"""
class _AnyListRemainder:
def __repr__(self) -> str:
return "*ANYLIST"
ANYLIST = [_AnyListRemainder()]
"""Matches any list remainder"""
def match_string(got: Optional[str], expected: Union[str, Operator, None]) -> bool:
if isinstance(expected, AnyOptStr):
return True
elif isinstance(expected, AnyStr) and got is not None:
return True
elif isinstance(expected, StrRe):
if got is None or not re.match(expected.regexp, got):
return False
elif isinstance(expected, NotStrRe):
if got is None or re.match(expected.regexp, got):
return False
elif isinstance(expected, Operator):
raise NotImplementedError(f"Unsupported operator: {expected}")
elif got != expected:
return False
return True
def match_list(
got: List[Optional[str]], expected: List[Union[str, None, Operator]]
) -> bool:
"""Returns True iff the list are equal.
The ANYSTR operator can be used on the 'expected' side as a wildcard,
matching any *single* value; and StrRe("<regexp>") can be used to match regular
expressions"""
if expected[-1] is ANYLIST[0]:
expected = expected[0:-1]
got = got[0 : len(expected)] # Ignore remaining
if len(got) != len(expected):
return False
return all(
match_string(got_value, expected_value)
for (got_value, expected_value) in zip(got, expected)
)
def match_dict(
got: Dict[str, Optional[str]],
expected: Dict[Union[str, Operator], Union[str, Operator, None]],
) -> bool:
"""Returns True iff the list are equal.
The ANYSTR operator can be used on the 'expected' side as a wildcard,
matching any *single* value; and StrRe("<regexp>") can be used to match regular
expressions
Additionally, the Keys() operator can be used to match remaining keys, and
ANYDICT to match any remaining dict"""
got = dict(got) # shallow copy, as we will remove keys
# Set to not-None if we find a Keys() operator in the dict keys
remaining_keys_wildcard = None
for (expected_key, expected_value) in expected.items():
if isinstance(expected_key, RemainingKeys):
remaining_keys_wildcard = (expected_key.key, expected_value)
elif isinstance(expected_key, Operator):
raise NotImplementedError(f"Unsupported operator: {expected_key}")
else:
if expected_key not in got:
return False
got_value = got.pop(expected_key)
if not match_string(got_value, expected_value):
return False
if remaining_keys_wildcard:
(expected_key, expected_value) = remaining_keys_wildcard
for (key, value) in got.items():
if not match_string(key, expected_key):
return False
if not match_string(value, expected_value):
return False
return True
else:
# There should be nothing left unmatched in the dict
return got == {}
| mit | 2,884,978,005,842,988,500 | 27.927152 | 83 | 0.639423 | false |
gnulinooks/sympy | sympy/geometry/util.py | 1 | 4540 |
def intersection(*entities):
"""
Finds the intersection between a list GeometryEntity instances. Returns a
list of all the intersections, Will raise a NotImplementedError exception
if unable to calculate the intersection.
Examples:
=========
>>> from sympy.geometry import *
>>> p1,p2,p3 = Point(0,0), Point(1,1), Point(-1, 5)
>>> l1, l2 = Line(p1, p2), Line(p3, p2)
>>> c = Circle(p2, 1)
>>> intersection(l1, p2)
[Point(1, 1)]
>>> intersection(l1, l2)
[Point(1, 1)]
>>> intersection(c, p2)
[]
>>> intersection(c, Point(1, 0))
[Point(1, 0)]
>>> intersection(c, l2)
[Point(1 - 5**(1/2)/5, 1 + 2*5**(1/2)/5), Point(1 + 5**(1/2)/5, 1 - 2*5**(1/2)/5)]
Notes:
======
- The intersection of any geometrical entity with itself should return
a list with one item: the entity in question.
- An intersection requires two or more entities. If only a single
entity is given then one will receive an empty intersection list.
- It is possible for intersection() to miss intersections that one
knows exists because the required quantities were not fully
simplified internally.
"""
from entity import GeometryEntity
entities = GeometryEntity.extract_entities(entities, False)
if len(entities) <= 1: return []
res = GeometryEntity.do_intersection(entities[0], entities[1])
for entity in entities[2:]:
newres = []
for x in res:
newres.extend( GeometryEntity.do_intersection(x, entity) )
res = newres
return res
def convex_hull(*args):
"""
Returns a Polygon representing the convex hull of a set of 2D points.
Notes:
======
This can only be performed on a set of non-symbolic points.
Example:
========
>>> from sympy.geometry import Point
>>> points = [ Point(x) for x in [(1,1), (1,2), (3,1), (-5,2), (15,4)] ]
>>> convex_hull(points)
Polygon(Point(3, 1), Point(15, 4), Point(-5, 2), Point(1, 1))
Description of method used:
===========================
See http://en.wikipedia.org/wiki/Graham_scan.
"""
from point import Point
from line import Segment
from polygon import Polygon
p = args[0]
if isinstance(p, Point):
p = args
# Basic checks
if len(p) == 1:
return p[0]
elif len(p) == 2:
return Segment(p[0], p[1])
# Find lowest+rightmost point
m = 0
for i in xrange(1, len(p)):
if (p[i][1] < p[m][1]) or ((p[i][1] == p[m][1]) and (p[i][0] > p[m][0])):
m = i
p[0], p[m] = p[m], p[0]
def tarea(a, b, c):
return (b[0] - a[0])*(c[1] - a[1]) - (c[0] - a[0])*(b[1] - a[1])
# Radial sort of points with respect to p[0] (our pivot)
destroy = {}
p0 = p[0]
def pcompare(p1, p2):
a = tarea(p0, p1, p2)
if a > 0:
return -1
elif a < 0:
return 1
else:
x = abs(p1[0] - p0[0]) - abs(p2[0] - p0[0])
y = abs(p1[1] - p0[1]) - abs(p2[1] - p0[1])
if (x < 0) or (y < 0):
destroy[p1] = True
return -1
elif (x > 0) or (y > 0):
destroy[p2] = True
return 1
else:
destroy[p1] = True
return 0
p = p[1:]
p.sort(pcompare)
p.insert(0, p0)
# Destroy points as found by sorting
for i in xrange(len(p)-1, -1, -1):
if p[i] in destroy:
del p[i]
# Graham scan
def isleft(a, b, c):
return (tarea(a, b, c) > 0)
top = [p[0], p[1]]
i = 2
while i < len(p):
p1 = top[-2]
p2 = top[-1]
if isleft(p1, p2, p[i]):
top.append(p[i])
i += 1
else:
top.pop()
return Polygon(top)
def are_similar(e1, e2):
"""
Returns True if e1 and e2 are similar (one can be uniformly scaled to
the other) or False otherwise.
Notes:
======
- If the two objects are equal then they are always similar.
"""
if e1 == e2: return True
try:
return e1.is_similar(e2)
except AttributeError:
try:
return e2.is_similar(e1)
except AttributeError:
n1 = e1.__class__.__name__
n2 = e2.__class__.__name__
raise GeometryError("Cannot test similarity between %s and %s" % (n1, n2))
| bsd-3-clause | -745,098,334,515,555,700 | 28.102564 | 90 | 0.50837 | false |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/core/generic.py | 1 | 265103 | # pylint: disable=W0231,E1101
import collections
import warnings
import operator
import weakref
import gc
import json
import numpy as np
import pandas as pd
from pandas._libs import tslib, lib, properties
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
is_scalar,
is_number,
is_integer, is_bool,
is_bool_dtype,
is_numeric_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
is_datetime64tz_dtype,
is_list_like,
is_dict_like,
is_re_compilable,
pandas_dtype)
from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.generic import ABCSeries, ABCPanel, ABCDataFrame
from pandas.core.common import (_count_not_none,
_maybe_box_datetimelike, _values_from_object,
AbstractMethodError, SettingWithCopyError,
SettingWithCopyWarning)
from pandas.core.base import PandasObject, SelectionMixin
from pandas.core.index import (Index, MultiIndex, _ensure_index,
InvalidIndexError)
import pandas.core.indexing as indexing
from pandas.core.indexing import maybe_convert_indices
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex, Period
from pandas.core.internals import BlockManager
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.core.missing as missing
from pandas.io.formats.printing import pprint_thing
from pandas.io.formats.format import format_percentiles, DataFrameFormatter
from pandas.tseries.frequencies import to_offset
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.compat import (map, zip, lzip, lrange, string_types,
isidentifier, set_function_name, cPickle as pkl)
from pandas.core.ops import _align_method_FRAME
import pandas.core.nanops as nanops
from pandas.util._decorators import (Appender, Substitution,
deprecate_kwarg)
from pandas.util._validators import validate_bool_kwarg
from pandas.core import config
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = dict()
_shared_doc_kwargs = dict(
axes='keywords for axes', klass='NDFrame',
axes_single_arg='int or labels for object',
args_transpose='axes to permute (int or label for object)',
optional_by="""
by : str or list of str
Name or list of names which refer to the axis items.""")
def _single_replace(self, to_replace, method, inplace, limit):
if self.ndim != 1:
raise TypeError('cannot replace {0} with method {1} on a {2}'
.format(to_replace, method, type(self).__name__))
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index,
dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
class NDFrame(PandasObject, SelectionMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : boolean, default False
"""
_internal_names = ['_data', '_cacher', '_item_cache', '_cache', 'is_copy',
'_subtyp', '_name', '_index', '_default_kind',
'_default_fill_value', '_metadata', '__array_struct__',
'__array_interface__']
_internal_names_set = set(_internal_names)
_accessors = frozenset([])
_deprecations = frozenset(['as_blocks', 'blocks',
'consolidate', 'convert_objects'])
_metadata = []
is_copy = None
def __init__(self, data, axes=None, copy=False, dtype=None,
fastpath=False):
if not fastpath:
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
object.__setattr__(self, 'is_copy', None)
object.__setattr__(self, '_data', data)
object.__setattr__(self, '_item_cache', {})
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option('display.max_rows'))
payload = json.loads(data.to_json(orient='table'),
object_pairs_hook=collections.OrderedDict)
return payload
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == 'V':
raise NotImplementedError("compound dtypes are not implemented"
" in the {0} constructor"
.format(self.__class__.__name__))
return dtype
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(axe,
axis=self._get_block_manager_axis(a),
copy=False)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self):
"""Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
def __unicode__(self):
# unicode representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = '[%s]' % ','.join(map(pprint_thing, self))
return '%s(%s)' % (self.__class__.__name__, prepr)
def _dir_additions(self):
""" add the string-like attributes from the info_axis """
additions = set([c for c in self._info_axis
if isinstance(c, string_types) and isidentifier(c)])
return super(NDFrame, self)._dir_additions().union(additions)
@property
def _constructor_sliced(self):
"""Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame() and DataFrame.to_panel()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
@classmethod
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,
slicers=None, axes_are_reversed=False, build_axes=True,
ns=None):
"""Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = dict((a, i) for i, a in enumerate(axes))
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = dict((v, k) for k, v in cls._AXIS_ALIASES.items())
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
# addtl parms
if isinstance(ns, dict):
for k, v in ns.items():
setattr(cls, k, v)
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = dict([(a, self._get_axis(a)) for a in (axes or self._AXIS_ORDERS)])
d.update(kwargs)
return d
@staticmethod
def _construct_axes_dict_from(self, axes, **kwargs):
"""Return an axes dictionary for the passed axes."""
d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)])
d.update(kwargs)
return d
def _construct_axes_dict_for_slice(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = dict([(self._AXIS_SLICEMAP[a], self._get_axis(a))
for a in (axes or self._AXIS_ORDERS)])
d.update(kwargs)
return d
def _construct_axes_from_arguments(self, args, kwargs, require_all=False):
"""Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
"""
# construct the args
args = list(args)
for a in self._AXIS_ORDERS:
# if we have an alias for this axis
alias = self._AXIS_IALIASES.get(a)
if alias is not None:
if a in kwargs:
if alias in kwargs:
raise TypeError("arguments are mutually exclusive "
"for [%s,%s]" % (a, alias))
continue
if alias in kwargs:
kwargs[a] = kwargs.pop(alias)
continue
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
raise TypeError("not enough/duplicate arguments "
"specified!")
axes = dict([(a, kwargs.pop(a, None)) for a in self._AXIS_ORDERS])
return axes, kwargs
@classmethod
def _from_axes(cls, data, axes, **kwargs):
# for construction from BlockManager
if isinstance(data, BlockManager):
return cls(data, **kwargs)
else:
if cls._AXIS_REVERSED:
axes = axes[::-1]
d = cls._construct_axes_dict_from(cls, axes, copy=False)
d.update(kwargs)
return cls(data, **d)
def _get_axis_number(self, axis):
axis = self._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in self._AXIS_NAMES:
return axis
else:
try:
return self._AXIS_NUMBERS[axis]
except:
pass
raise ValueError('No axis named {0} for object type {1}'
.format(axis, type(self)))
def _get_axis_name(self, axis):
axis = self._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, string_types):
if axis in self._AXIS_NUMBERS:
return axis
else:
try:
return self._AXIS_NAMES[axis]
except:
pass
raise ValueError('No axis named {0} for object type {1}'
.format(axis, type(self)))
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
def _get_block_manager_axis(self, axis):
"""Map the axis to the block_manager axis."""
axis = self._get_axis_number(axis)
if self._AXIS_REVERSED:
m = self._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis):
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = '{prefix}level_{i}'.format(prefix=prefix, i=i)
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self):
d = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return d
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self):
"""Return a tuple of axis dimensions"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self):
"""Return index label(s) of the internal NDFrame"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self):
"""Number of axes / array dimensions"""
return self._data.ndim
@property
def size(self):
"""number of elements in the NDFrame"""
return np.prod(self.shape)
@property
def _selected_obj(self):
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self):
""" internal compat with SelectionMixin """
return self
def _expand_axes(self, key):
new_axes = []
for k, ax in zip(key, self.axes):
if k not in ax:
if type(k) != ax.dtype.type:
ax = ax.astype('O')
new_axes.append(ax.insert(len(ax), k))
else:
new_axes.append(ax)
return new_axes
_shared_docs['set_axis'] = """Assign desired index to given axis
Parameters
----------
labels: list-like or Index
The values for the new index
axis : int or string, default 0
inplace : boolean, default None
Whether to return a new %(klass)s instance.
WARNING: inplace=None currently falls back to to True, but
in a future version, will default to False. Use inplace=True
explicitly rather than relying on the default.
.. versionadded:: 0.21.0
The signature is make consistent to the rest of the API.
Previously, the "axis" and "labels" arguments were respectively
the first and second positional arguments.
Returns
-------
renamed : %(klass)s or None
An object of same type as caller if inplace=False, None otherwise.
See Also
--------
pandas.NDFrame.rename
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0, inplace=False)
a 1
b 2
c 3
dtype: int64
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.set_axis(['a', 'b', 'c'], axis=0, inplace=False)
A B
a 1 4
b 2 5
c 3 6
>>> df.set_axis(['I', 'II'], axis=1, inplace=False)
I II
0 1 4
1 2 5
2 3 6
>>> df.set_axis(['i', 'ii'], axis=1, inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
@Appender(_shared_docs['set_axis'] % dict(klass='NDFrame'))
def set_axis(self, labels, axis=0, inplace=None):
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and \"labels\" as second, is still supported '
'but will be deprecated in a future version of pandas.',
FutureWarning, stacklevel=2)
labels, axis = axis, labels
if inplace is None:
warnings.warn(
'set_axis currently defaults to operating inplace.\nThis '
'will change in a future version of pandas, use '
'inplace=True to avoid this warning.',
FutureWarning, stacklevel=2)
inplace = True
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis, labels):
self._data.set_axis(axis, labels)
self._clear_item_cache()
_shared_docs['transpose'] = """
Permute the dimensions of the %(klass)s
Parameters
----------
args : %(args_transpose)s
copy : boolean, default False
Make a copy of the underlying data. Mixed-dtype data will
always result in a copy
Examples
--------
>>> p.transpose(2, 0, 1)
>>> p.transpose(2, 0, 1, copy=True)
Returns
-------
y : same as input
"""
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs,
require_all=True)
axes_names = tuple([self._get_axis_name(axes[a])
for a in self._AXIS_ORDERS])
axes_numbers = tuple([self._get_axis_number(axes[a])
for a in self._AXIS_ORDERS])
# we must have unique axes
if len(axes) != len(set(axes)):
raise ValueError('Must specify %s unique axes' % self._AXIS_LEN)
new_axes = self._construct_axes_dict_from(self, [self._get_axis(x)
for x in axes_names])
new_values = self.values.transpose(axes_numbers)
if kwargs.pop('copy', None) or (len(args) and args[-1]):
new_values = new_values.copy()
nv.validate_transpose_for_generic(self, kwargs)
return self._constructor(new_values, **new_axes).__finalize__(self)
def swapaxes(self, axis1, axis2, copy=True):
"""
Interchange axes and swap values axes appropriately
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Column label to be popped
Returns
-------
popped : Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze length 1 dimensions.
Parameters
----------
axis : None, integer or string axis name, optional
The axis to squeeze if 1-sized.
.. versionadded:: 0.20.0
Returns
-------
scalar if 1-sized, else original object
"""
axis = (self._AXIS_NAMES if axis is None else
(self._get_axis_number(axis),))
try:
return self.iloc[
tuple([0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)])]
except:
return self
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
# ----------------------------------------------------------------------
# Rename
# TODO: define separate funcs for DataFrame, Series and Panel so you can
# get completion on keyword arguments.
_shared_docs['rename'] = """
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(optional_mapper)s
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame or Panel.
dict-like or functions are transformations to apply to
that axis' values
%(optional_axis)s
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
Returns
-------
renamed : %(klass)s (new object)
See Also
--------
pandas.NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
@Appender(_shared_docs['rename'] % dict(axes='axes keywords for this'
' object', klass='NDFrame',
optional_mapper='',
optional_axis=''))
def rename(self, *args, **kwargs):
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
copy = kwargs.pop('copy', True)
inplace = kwargs.pop('inplace', False)
level = kwargs.pop('level', None)
if kwargs:
raise TypeError('rename() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if com._count_not_none(*axes.values()) == 0:
raise TypeError('must pass an index to rename')
# renamer function if passed a dict
def _get_rename_function(mapper):
if isinstance(mapper, (dict, ABCSeries)):
def f(x):
if x in mapper:
return mapper[x]
else:
return x
else:
f = mapper
return f
self._consolidate_inplace()
result = self if inplace else self.copy(deep=copy)
# start in the axis order to eliminate too many copies
for axis in lrange(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is None:
continue
f = _get_rename_function(v)
baxis = self._get_block_manager_axis(axis)
if level is not None:
level = self.axes[axis]._get_level_number(level)
result._data = result._data.rename_axis(f, axis=baxis, copy=copy,
level=level)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
else:
return result.__finalize__(self)
rename.__doc__ = _shared_docs['rename']
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
"""Alter the name of the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
axis : int or string, default 0
copy : boolean, default True
Also copy underlying data
inplace : boolean, default False
Returns
-------
renamed : type of caller or None if inplace=True
Notes
-----
Prior to version 0.21.0, ``rename_axis`` could also be used to change
the axis *labels* by passing a mapping or scalar. This behavior is
deprecated and will be removed in a future version. Use ``rename``
instead.
See Also
--------
pandas.Series.rename, pandas.DataFrame.rename
pandas.Index.rename
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename_axis("foo")
A B
foo
0 1 4
1 2 5
2 3 6
>>> df.rename_axis("bar", axis="columns")
bar A B
0 1 4
1 2 5
2 3 6
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
non_mapper = is_scalar(mapper) or (is_list_like(mapper) and not
is_dict_like(mapper))
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
msg = ("Using 'rename_axis' to alter labels is deprecated. "
"Use '.rename' instead")
warnings.warn(msg, FutureWarning, stacklevel=2)
axis = self._get_axis_name(axis)
d = {'copy': copy, 'inplace': inplace}
d[axis] = mapper
return self.rename(**d)
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Alter the name or names of the axis.
Parameters
----------
name : str or list of str
Name for the Index, or list of names for the MultiIndex
axis : int or str
0 or 'index' for the index; 1 or 'columns' for the columns
inplace : bool
whether to modify `self` directly or return a copy
.. versionadded: 0.21.0
Returns
-------
renamed : type of caller or None if inplace=True
See Also
--------
pandas.DataFrame.rename
pandas.Series.rename
pandas.Index.rename
Examples
--------
>>> df._set_axis_name("foo")
A
foo
0 1
1 2
2 3
>>> df.index = pd.MultiIndex.from_product([['A'], ['a', 'b', 'c']])
>>> df._set_axis_name(["bar", "baz"])
A
bar baz
A a 1
b 2
c 3
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, 'inplace')
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparisons
def _indexed_same(self, other):
return all([self._get_axis(a).equals(other._get_axis(a))
for a in self._AXIS_ORDERS])
def __neg__(self):
values = _values_from_object(self)
if values.dtype == np.bool_:
arr = operator.inv(values)
else:
arr = operator.neg(values)
return self.__array_wrap__(arr)
def __invert__(self):
try:
arr = operator.inv(_values_from_object(self))
return self.__array_wrap__(arr)
except:
# inv fails with 0 len
if not np.prod(self.shape):
return self
raise
def equals(self, other):
"""
Determines if two NDFrame objects contain the same elements. NaNs in
the same location are considered equal.
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError('{0!r} objects are mutable, thus they cannot be'
' hashed'.format(self.__class__.__name__))
def __iter__(self):
"""Iterate over infor axis"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""Get the 'info axis' (see Indexing for more)
This is index for Series, columns for DataFrame and major_axis for
Panel.
"""
return self._info_axis
def iteritems(self):
"""Iterate over (label, values) on info axis
This is index for Series, columns for DataFrame, major_axis for Panel,
and so on.
"""
for h in self._info_axis:
yield h, self[h]
def __len__(self):
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key):
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self):
"""True if NDFrame is entirely empty [no items], meaning any of the
axes are of length 0.
Notes
-----
If NDFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
See also
--------
pandas.Series.dropna
pandas.DataFrame.dropna
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))
self.__nonzero__()
def __abs__(self):
return self.abs()
def __round__(self, decimals=0):
return self.round(decimals)
# ----------------------------------------------------------------------
# Array Interface
def __array__(self, dtype=None):
return _values_from_object(self)
def __array_wrap__(self, result, context=None):
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
def to_dense(self):
"""Return dense representation of NDFrame (as opposed to sparse)"""
# compat
return self
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self):
meta = dict((k, getattr(self, k, None)) for k in self._metadata)
return dict(_data=self._data, _typ=self._typ, _metadata=self._metadata,
**meta)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get('_typ')
if typ is not None:
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
self._unpickle_series_compat(state)
elif isinstance(state[0], dict):
if len(state) == 5:
self._unpickle_sparse_frame_compat(state)
else:
self._unpickle_frame_compat(state)
elif len(state) == 4:
self._unpickle_panel_compat(state)
elif len(state) == 2:
self._unpickle_series_compat(state)
else: # pragma: no cover
# old pickling format, for compatibility
self._unpickle_matrix_compat(state)
self._item_cache = {}
# ----------------------------------------------------------------------
# IO
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option('display.latex.repr'):
return self.to_latex()
else:
return None
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs['to_excel'] = """
Write %(klass)s to an excel sheet
%(versionadded_to_excel)s
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : boolean, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding: string, default None
encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
freeze_panes : tuple of integer (length 2), default None
Specifies the one-based bottommost row and rightmost column that
is to be frozen
.. versionadded:: 0.20.0
Notes
-----
If passing an existing ExcelWriter object, then the sheet will be added
to the existing workbook. This can be used to save different
DataFrames to one workbook:
>>> writer = pd.ExcelWriter('output.xlsx')
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
For compatibility with to_csv, to_excel serializes lists and dicts to
strings before writing.
"""
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression=None):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : the path or buffer to write the result string
if this is None, return the converted string
orient : string
* Series
- default is 'index'
- allowed values are: {'split','records','index'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values'}
* The format of the JSON string
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
- index : dict like {index -> {column -> value}}
- columns : dict like {column -> {index -> value}}
- values : just the values array
- table : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. `epoch` = epoch milliseconds,
`iso` = ISO8601. The default depends on the `orient`. For
`orient='table'`, the default is `'iso'`. For all other orients,
the default is `'epoch'`.
double_precision : The number of decimal places to use when encoding
floating point values, default 10.
force_ascii : force encoded string to be ASCII, default True.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : boolean, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
.. versionadded:: 0.19.0
compression : {None, 'gzip', 'bz2', 'xz'}
A string representing the compression to use in the output file,
only used when the first argument is a filename
.. versionadded:: 0.21.0
Returns
-------
same type as input object with filtered info axis
See Also
--------
pd.read_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == 'table':
date_format = 'iso'
elif date_format is None:
date_format = 'epoch'
return json.to_json(path_or_buf=path_or_buf, obj=self, orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii, date_unit=date_unit,
default_handler=default_handler,
lines=lines, compression=compression)
def to_hdf(self, path_or_buf, key, **kwargs):
"""Write the contained data to an HDF5 file using HDFStore.
Parameters
----------
path_or_buf : the path (string) or HDFStore object
key : string
identifier for the group in the store
mode : optional, {'a', 'w', 'r+'}, default 'a'
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
For Table formats, append the input data to the existing
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.
Applicable only to format='table'.
complevel : int, 0-9, default None
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
dropna : boolean, default False.
If true, ALL nan rows will not be written to store.
"""
from pandas.io import pytables
return pytables.to_hdf(path_or_buf, key, self, **kwargs)
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated string
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, encoding=encoding,
**kwargs)
def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
name : string
Name of SQL table
con : SQLAlchemy engine or DBAPI2 connection (legacy mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
used.
schema : string, default None
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
from pandas.io import sql
sql.to_sql(self, name, con, flavor=flavor, schema=schema,
if_exists=if_exists, index=index, index_label=index_label,
chunksize=chunksize, dtype=dtype)
def to_pickle(self, path, compression='infer',
protocol=pkl.HIGHEST_PROTOCOL):
"""
Pickle (serialize) object to input file path.
Parameters
----------
path : string
File path
compression : {'infer', 'gzip', 'bz2', 'xz', None}, default 'infer'
a string representing the compression to use in the output file
.. versionadded:: 0.20.0
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1], paragraph 12.1.2). The possible
values for this parameter depend on the version of Python. For
Python 2.x, possible values are 0, 1, 2. For Python>=3.0, 3 is a
valid value. For Python >= 3.4, 4 is a valid value.A negative value
for the protocol parameter is equivalent to setting its value to
HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html
.. versionadded:: 0.21.0
"""
from pandas.io.pickle import to_pickle
return to_pickle(self, path, compression=compression,
protocol=protocol)
def to_clipboard(self, excel=None, sep=None, **kwargs):
"""
Attempt to write text representation of object to the system clipboard
This can be pasted into Excel, for example.
Parameters
----------
excel : boolean, defaults to True
if True, use the provided separator, writing in a csv
format for allowing easy pasting into excel.
if False, write a string representation of the object
to the clipboard
sep : optional, defaults to tab
other keywords are passed to to_csv
Notes
-----
Requirements for your platform
- Linux: xclip, or xsel (with gtk or PyQt4 modules)
- Windows: none
- OS X: none
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
a DataArray for a Series
a Dataset for a DataFrame
a DataArray for higher dims
Examples
--------
>>> df = pd.DataFrame({'A' : [1, 1, 2],
'B' : ['foo', 'bar', 'foo'],
'C' : np.arange(4.,7)})
>>> df
A B C
0 1 foo 4.0
1 1 bar 5.0
2 2 foo 6.0
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 3)
Coordinates:
* index (index) int64 0 1 2
Data variables:
A (index) int64 1 1 2
B (index) object 'foo' 'bar' 'foo'
C (index) float64 4.0 5.0 6.0
>>> df = pd.DataFrame({'A' : [1, 1, 2],
'B' : ['foo', 'bar', 'foo'],
'C' : np.arange(4.,7)}
).set_index(['B','A'])
>>> df
C
B A
foo 1 4.0
bar 1 5.0
foo 2 6.0
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (A: 2, B: 2)
Coordinates:
* B (B) object 'bar' 'foo'
* A (A) int64 1 2
Data variables:
C (B, A) float64 5.0 nan 4.0 6.0
>>> p = pd.Panel(np.arange(24).reshape(4,3,2),
items=list('ABCD'),
major_axis=pd.date_range('20130101', periods=3),
minor_axis=['first', 'second'])
>>> p
<class 'pandas.core.panel.Panel'>
Dimensions: 4 (items) x 3 (major_axis) x 2 (minor_axis)
Items axis: A to D
Major_axis axis: 2013-01-01 00:00:00 to 2013-01-03 00:00:00
Minor_axis axis: first to second
>>> p.to_xarray()
<xarray.DataArray (items: 4, major_axis: 3, minor_axis: 2)>
array([[[ 0, 1],
[ 2, 3],
[ 4, 5]],
[[ 6, 7],
[ 8, 9],
[10, 11]],
[[12, 13],
[14, 15],
[16, 17]],
[[18, 19],
[20, 21],
[22, 23]]])
Coordinates:
* items (items) object 'A' 'B' 'C' 'D'
* major_axis (major_axis) datetime64[ns] 2013-01-01 2013-01-02 2013-01-03 # noqa
* minor_axis (minor_axis) object 'first' 'second'
Notes
-----
See the `xarray docs <http://xarray.pydata.org/en/stable/>`__
"""
try:
import xarray
except ImportError:
# Give a nice error message
raise ImportError("the xarray library is not installed\n"
"you can install via conda\n"
"conda install xarray\n"
"or via pip\n"
"pip install xarray\n")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
elif self.ndim == 2:
return xarray.Dataset.from_dataframe(self)
# > 2 dims
coords = [(a, self._get_axis(a)) for a in self._AXIS_ORDERS]
return xarray.DataArray(self,
coords=coords,
)
_shared_docs['to_latex'] = r"""
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \\usepackage{booktabs}.
.. versionchanged:: 0.20.2
Added to Series
`to_latex`-specific options:
bold_rows : boolean, default False
Make the row labels bold in the output
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3
columns
longtable : boolean, default will be read from the pandas config module
Default: False.
Use a longtable environment instead of tabular. Requires adding
a \\usepackage{longtable} to your LaTeX preamble.
escape : boolean, default will be read from the pandas config module
Default: True.
When set to False prevents from escaping latex special
characters in column names.
encoding : str, default None
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
decimal : string, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
.. versionadded:: 0.18.0
multicolumn : boolean, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : boolean, default False
Use \multirow to enhance MultiIndex rows.
Requires adding a \\usepackage{multirow} to your LaTeX preamble.
Will print centered labels (instead of top-aligned)
across the contained rows, separating groups via clines.
The default will be read from the pandas config module.
.. versionadded:: 0.20.0
"""
@Substitution(header='Write out the column names. If a list of strings '
'is given, it is assumed to be aliases for the '
'column names.')
@Appender(_shared_docs['to_latex'] % _shared_doc_kwargs)
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='NaN', formatters=None, float_format=None,
sparsify=None, index_names=True, bold_rows=False,
column_format=None, longtable=None, escape=None,
encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option(
"display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape, decimal=decimal)
formatter.to_latex(column_format=column_format, longtable=longtable,
encoding=encoding, multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if buf is None:
return formatter.buf.getvalue()
# ----------------------------------------------------------------------
# Fancy Indexing
@classmethod
def _create_indexer(cls, name, indexer):
"""Create an indexer like _name in the class."""
if getattr(cls, name, None) is None:
iname = '_%s' % name
setattr(cls, iname, None)
def _indexer(self):
i = getattr(self, iname)
if i is None:
i = indexer(self, name)
setattr(self, iname, i)
return i
setattr(cls, name, property(_indexer, doc=indexer.__doc__))
# add to our internal names set
cls._internal_names_set.add(iname)
def get(self, key, default=None):
"""
Get item from object for given key (DataFrame column, Panel slice,
etc.). Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : type of items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def __getitem__(self, item):
return self._get_item_cache(item)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res.is_copy = self.is_copy
return res
def _set_as_cached(self, item, cacher):
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self):
"""Reset the cacher."""
if hasattr(self, '_cacher'):
del self._cacher
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer."""
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self._take(item, axis=self._info_axis_number,
convert=True)
return lower
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _maybe_cache_changed(self, item, value):
"""The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value, check=False)
@property
def _is_cached(self):
"""Return boolean indicating if self is cached or not."""
return getattr(self, '_cacher', None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
cacher = cacher[1]()
return cacher
@property
def _is_view(self):
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : boolean, default False
clear the item cache
verify_is_copy : boolean, default True
provide is_copy checks
"""
cacher = getattr(self, '_cacher', None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
try:
ref._maybe_cache_changed(cacher[0], self)
except:
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t='referant')
if clear:
self._clear_item_cache()
def _clear_item_cache(self, i=None):
if i is not None:
self._item_cache.pop(i, None)
else:
self._item_cache.clear()
def _slice(self, slobj, axis=0, kind=None):
"""
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
"""
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view slicable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value):
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref=None, copy=True):
if not copy:
self.is_copy = None
else:
if ref is not None:
self.is_copy = weakref.ref(ref)
else:
self.is_copy = None
def _check_is_chained_assignment_possible(self):
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t='referant',
force=True)
return True
elif self.is_copy:
self._check_setitem_copy(stacklevel=4, t='referant')
return False
def _check_setitem_copy(self, stacklevel=4, t='setting', force=False):
"""
Parameters
----------
stacklevel : integer, default 4
the level to show of the stack when the error is output
t : string, the type of setting error
force : boolean, default False
if True, then force showing an error
validate if we are doing a settitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
if force or self.is_copy:
value = config.get_option('mode.chained_assignment')
if value is None:
return
# see if the copy is not actually refererd; if so, then disolve
# the copy weakref
try:
gc.collect(2)
if not gc.get_referents(self.is_copy()):
self.is_copy = None
return
except:
pass
# we might be a false positive
try:
if self.is_copy().shape == self.shape:
self.is_copy = None
return
except:
pass
# a custom message
if isinstance(self.is_copy, string_types):
t = self.is_copy
elif t == 'referant':
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/"
"indexing.html#indexing-view-versus-copy"
)
else:
t = ("\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/"
"indexing.html#indexing-view-versus-copy"
)
if value == 'raise':
raise SettingWithCopyError(t)
elif value == 'warn':
warnings.warn(t, SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key):
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if hasattr(self, 'columns') and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key, )
for col in self.columns:
if isinstance(col, tuple) and col[:len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
_shared_docs['_take'] = """
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
This is the internal version of ``.take()`` and will contain a wider
selection of parameters useful for internal use but not as suitable
for public usage.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : int, default 0
The axis on which to select elements. "0" means that we are
selecting rows, "1" means that we are selecting columns, etc.
convert : bool, default True
Whether to convert negative indices into positive ones.
For example, ``-1`` would map to the ``len(axis) - 1``.
The conversions are similar to the behavior of indexing a
regular Python list.
is_copy : bool, default True
Whether to return a copy of the original object or not.
Returns
-------
taken : type of caller
An array-like containing the elements taken from the object.
See Also
--------
numpy.ndarray.take
numpy.take
"""
@Appender(_shared_docs['_take'])
def _take(self, indices, axis=0, convert=True, is_copy=True):
self._consolidate_inplace()
if convert:
indices = maybe_convert_indices(indices, len(self._get_axis(axis)))
new_data = self._data.take(indices,
axis=self._get_block_manager_axis(axis),
verify=True)
result = self._constructor(new_data).__finalize__(self)
# Maybe set copy if we didn't actually change the index.
if is_copy:
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
_shared_docs['take'] = """
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : int, default 0
The axis on which to select elements. "0" means that we are
selecting rows, "1" means that we are selecting columns, etc.
convert : bool, default True
.. deprecated:: 0.21.0
In the future, negative indices will always be converted.
Whether to convert negative indices into positive ones.
For example, ``-1`` would map to the ``len(axis) - 1``.
The conversions are similar to the behavior of indexing a
regular Python list.
is_copy : bool, default True
Whether to return a copy of the original object or not.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
('parrot', 'bird', 24.0),
('lion', 'mammal', 80.5),
('monkey', 'mammal', np.nan)],
columns=('name', 'class', 'max_speed'),
index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
Returns
-------
taken : type of caller
An array-like containing the elements taken from the object.
See Also
--------
numpy.ndarray.take
numpy.take
"""
@Appender(_shared_docs['take'])
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):
if convert is not None:
msg = ("The 'convert' parameter is deprecated "
"and will be removed in a future version.")
warnings.warn(msg, FutureWarning, stacklevel=2)
else:
convert = True
convert = nv.validate_take(tuple(), kwargs)
return self._take(indices, axis=axis, convert=convert, is_copy=is_copy)
def xs(self, key, axis=0, level=None, drop_level=True):
"""
Returns a cross-section (row(s) or column(s)) from the
Series/DataFrame. Defaults to cross-section on the rows (axis=0).
Parameters
----------
key : object
Some label contained in the index, or partially in a MultiIndex
axis : int, default 0
Axis to retrieve cross-section on
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : boolean, default True
If False, returns object with same levels as self.
Examples
--------
>>> df
A B C
a 4 5 2
b 4 0 9
c 9 7 3
>>> df.xs('a')
A 4
B 5
C 2
Name: a
>>> df.xs('C', axis=1)
a 2
b 9
c 3
Name: C
>>> df
A B C D
first second third
bar one 1 4 1 8 9
two 1 7 5 5 0
baz one 1 6 6 8 0
three 2 5 3 5 3
>>> df.xs(('baz', 'three'))
A B C D
third
2 5 3 5 3
>>> df.xs('one', level=1)
A B C D
first third
bar 1 4 1 8 9
baz 1 6 6 8 0
>>> df.xs(('baz', 2), level=[0, 'third'])
A B C D
second
three 5 3 5 3
Returns
-------
xs : Series or DataFrame
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or
levels. It is a superset of xs functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level,
drop_level=drop_level)
# create the tuple of the indexer
indexer = [slice(None)] * self.ndim
indexer[axis] = loc
indexer = tuple(indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key,
drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
inds, = loc.nonzero()
return self._take(inds, axis=axis, convert=False)
else:
return self._take(loc, axis=axis, convert=True)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
new_values = self._data.fast_xs(loc)
# may need to box a datelike-scalar
#
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
if not is_list_like(new_values) or self.ndim == 1:
return _maybe_box_datetimelike(new_values)
result = self._constructor_sliced(
new_values, index=self.columns,
name=self.index[loc], dtype=new_values.dtype)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view slicable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs = xs
def select(self, crit, axis=0):
"""
Return data corresponding to axis labels matching criteria
DEPRECATED: use df.loc[df.index.map(crit)] to select via labels
Parameters
----------
crit : function
To be called on each index (label). Should return True or False
axis : int
Returns
-------
selection : type of caller
"""
warnings.warn("'select' is deprecated and will be removed in a "
"future release. You can use "
".loc[labels.map(crit)] as a replacement",
FutureWarning, stacklevel=2)
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis)
if len(axis_values) > 0:
new_axis = axis_values[
np.asarray([bool(crit(label)) for label in axis_values])]
else:
new_axis = axis_values
return self.reindex(**{axis_name: new_axis})
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
"""Return an object with matching indices to myself.
Parameters
----------
other : Object
method : string or None
copy : boolean, default True
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between labels of the other object and this
object for inexact matches. Can be list-like.
.. versionadded:: 0.17.0
.. versionadded:: 0.21.0 (list-like tolerance)
Notes
-----
Like calling s.reindex(index=other.index, columns=other.columns,
method=...)
Returns
-------
reindexed : same as input
"""
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,
copy=copy, limit=limit,
tolerance=tolerance)
return self.reindex(**d)
def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
"""
Return new object with labels in requested axis removed.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : int or axis name
Whether to drop labels from the index (0 / 'index') or
columns (1 / 'columns').
index, columns : single label or list-like
Alternative to specifying `axis` (``labels, axis=1`` is
equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, default None
For MultiIndex
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : type of caller
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3,4),
columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Notes
-----
Specifying both `labels` and `index` or `columns` will raise a
ValueError.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and "
"'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError("Need to specify at least one of 'labels', "
"'index' or 'columns'")
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(self, labels, axis, level=None, errors='raise'):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis, axis_ = self._get_axis(axis), axis
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
dropped = self.reindex(**{axis_name: new_axis})
try:
dropped.axes[axis_].set_names(axis.names, inplace=True)
except AttributeError:
pass
result = dropped
else:
labels = _ensure_object(com._index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError('axis must be a MultiIndex')
indexer = ~axis.get_level_values(level).isin(labels)
else:
indexer = ~axis.isin(labels)
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : boolean, default True
provide is_copy checks
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, '_data', result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self, prefix):
"""
Concatenate prefix string with panel items names.
Parameters
----------
prefix : string
Returns
-------
with_prefix : type of caller
"""
new_data = self._data.add_prefix(prefix)
return self._constructor(new_data).__finalize__(self)
def add_suffix(self, suffix):
"""
Concatenate suffix string with panel items names.
Parameters
----------
suffix : string
Returns
-------
with_suffix : type of caller
"""
new_data = self._data.add_suffix(suffix)
return self._constructor(new_data).__finalize__(self)
_shared_docs['sort_values'] = """
Sort by the values along either axis
.. versionadded:: 0.17.0
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to direct sorting
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
if True, perform operation in-place
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end
Returns
-------
sorted_obj : %(klass)s
Examples
--------
>>> df = pd.DataFrame({
... 'col1' : ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2' : [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
def sort_values(self, by=None, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
NOT IMPLEMENTED: do not call this method, as sorting values is not
supported for Panel objects and will raise an error.
"""
raise NotImplementedError("sort_values has not been implemented "
"on Panel or Panel4D objects.")
_shared_docs['sort_index'] = """
Sort object by labels (along an axis)
Parameters
----------
axis : %(axes)s to direct sorting
level : int or level name or list of ints or list of level names
if not None, sort on values in specified index level(s)
ascending : boolean, default True
Sort ascending vs. descending
inplace : bool, default False
if True, perform operation in-place
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
`first` puts NaNs at the beginning, `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
if true and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level
Returns
-------
sorted_obj : %(klass)s
"""
@Appender(_shared_docs['sort_index'] % dict(axes="axes", klass="NDFrame"))
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True):
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
if level is not None:
raise NotImplementedError("level is not implemented")
if inplace:
raise NotImplementedError("inplace is not implemented")
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name: new_axis})
_shared_docs['reindex'] = """
Conform %(klass)s to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
copy=False
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional (should be specified using keywords)
New labels / index to conform to. Preferably an Index object to
avoid duplicating data
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional
method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* default: don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
limit : int, default None
Maximum number of consecutive elements to forward or backward fill
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.17.0
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({
... 'http_status': [200,200,404,404,301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100
2010-01-02 101
2010-01-03 NaN
2010-01-04 100
2010-01-05 89
2010-01-06 88
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100
2010-01-02 101
2010-01-03 NaN
2010-01-04 100
2010-01-05 89
2010-01-06 88
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to backpropagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100
2009-12-30 100
2009-12-31 100
2010-01-01 100
2010-01-02 101
2010-01-03 NaN
2010-01-04 100
2010-01-05 89
2010-01-06 88
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
Returns
-------
reindexed : %(klass)s
"""
# TODO: Decide if we care about having different examples for different
# kinds
@Appender(_shared_docs['reindex'] % dict(axes="axes", klass="NDFrame",
optional_labels="",
optional_axis=""))
def reindex(self, *args, **kwargs):
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop('method', None))
level = kwargs.pop('level', None)
copy = kwargs.pop('copy', True)
limit = kwargs.pop('limit', None)
tolerance = kwargs.pop('tolerance', None)
fill_value = kwargs.pop('fill_value', np.nan)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError('reindex() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all([self._get_axis(axis).identical(ax)
for axis, ax in axes.items() if ax is not None]):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
try:
return self._reindex_multi(axes, copy, fill_value)
except:
pass
# perform the reindex on the axes
return self._reindex_axes(axes, level, limit, tolerance, method,
fill_value, copy).__finalize__(self)
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value,
copy):
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(labels, level=level, limit=limit,
tolerance=tolerance, method=method)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy, allow_dups=False)
return obj
def _needs_reindex_multi(self, axes, method, level):
"""Check if we do need a multi reindex."""
return ((com._count_not_none(*axes.values()) == self._AXIS_LEN) and
method is None and level is None and not self._is_mixed_type)
def _reindex_multi(self, axes, copy, fill_value):
return NotImplemented
_shared_docs[
'reindex_axis'] = ("""Conform input object to new index with optional
filling logic, placing NA/NaN in locations having no value in the
previous index. A new object is produced unless the new index is
equivalent to the current one and copy=False
Parameters
----------
labels : array-like
New labels / index to conform to. Preferably an Index object to
avoid duplicating data
axis : %(axes_single_arg)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}, optional
Method to use for filling holes in reindexed DataFrame:
* default: don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
limit : int, default None
Maximum number of consecutive elements to forward or backward fill
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.17.0
.. versionadded:: 0.21.0 (list-like tolerance)
Examples
--------
>>> df.reindex_axis(['A', 'B', 'C'], axis=1)
See Also
--------
reindex, reindex_like
Returns
-------
reindexed : %(klass)s
""")
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
msg = ("'.reindex_axis' is deprecated and will be removed in a future "
"version. Use '.reindex' instead.")
self._consolidate_inplace()
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis_name)
method = missing.clean_reindex_fill_method(method)
warnings.warn(msg, FutureWarning, stacklevel=3)
new_index, indexer = axis_values.reindex(labels, method, level,
limit=limit)
return self._reindex_with_indexers({axis: [new_index, indexer]},
fill_value=fill_value, copy=copy)
def _reindex_with_indexers(self, reindexers, fill_value=np.nan, copy=False,
allow_dups=False):
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = _ensure_index(index)
if indexer is not None:
indexer = _ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(index, indexer, axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def _reindex_axis(self, new_index, fill_method, axis, copy):
new_data = self._data.reindex_axis(new_index, axis=axis,
method=fill_method, copy=copy)
if new_data is self._data and not copy:
return self
else:
return self._constructor(new_data).__finalize__(self)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
List of info axis to restrict to (must not all be present)
like : string
Keep info axis where "arg in col == True"
regex : string (regular expression)
Keep info axis with re.search(regex, col) == True
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame
Returns
-------
same type as input object
Examples
--------
>>> df
one two three
mouse 1 2 3
rabbit 4 5 6
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
See Also
--------
pandas.DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
"""
import re
nkw = _count_not_none(items, like, regex)
if nkw > 1:
raise TypeError('Keyword arguments `items`, `like`, or `regex` '
'are mutually exclusive')
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(
**{name: [r for r in items if r in labels]})
elif like:
def f(x):
if not isinstance(x, string_types):
x = str(x)
return like in x
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
matcher = re.compile(regex)
values = labels.map(lambda x: matcher.search(str(x)) is not None)
return self.loc(axis=axis)[values]
else:
raise TypeError('Must pass either `items`, `like`, or `regex`')
def head(self, n=5):
"""
Return the first n rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : type of caller
The first n rows of the caller object.
"""
return self.iloc[:n]
def tail(self, n=5):
"""
Return the last n rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_tail : type of caller
The last n rows of the caller object.
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
"""
Returns a random sample of items from an axis of object.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : boolean, optional
Sample with or without replacement. Default = False.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
inf and -inf values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames, 1 for Panels).
Returns
-------
A new object of same type as caller.
Examples
--------
Generate an example ``Series`` and ``DataFrame``:
>>> s = pd.Series(np.random.randn(50))
>>> s.head()
0 -0.038497
1 1.820773
2 -0.972766
3 -1.598270
4 -1.095526
dtype: float64
>>> df = pd.DataFrame(np.random.randn(50, 4), columns=list('ABCD'))
>>> df.head()
A B C D
0 0.016443 -2.318952 -0.566372 -1.028078
1 -1.051921 0.438836 0.658280 -0.175797
2 -1.243569 -0.364626 -0.215065 0.057736
3 1.768216 0.404512 -0.385604 -1.457834
4 1.072446 -1.137172 0.314194 -0.046661
Next extract a random sample from both of these objects...
3 random elements from the ``Series``:
>>> s.sample(n=3)
27 -0.994689
55 -1.049016
67 -0.224565
dtype: float64
And a random 10% of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.1, replace=True)
A B C D
35 1.981780 0.142106 1.817165 -0.290805
49 -1.336199 -0.448634 -0.789640 0.217116
40 0.823173 -0.078816 1.009536 1.015108
15 1.421154 -0.055301 -1.922594 -0.019696
6 -0.148339 0.832938 1.787600 -1.383767
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com._random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, pd.Series):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, string_types):
if isinstance(self, pd.DataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError("String passed to weights not a "
"valid column")
else:
raise ValueError("Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame")
else:
raise ValueError("Strings cannot be passed as weights "
"when sampling from a Series or Panel.")
weights = pd.Series(weights, dtype='float64')
if len(weights) != axis_length:
raise ValueError("Weights and axis to be sampled must be of "
"same length")
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative "
"values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError('Please enter a value for `frac` OR `n`, not '
'both')
# Check for negative sizes
if n < 0:
raise ValueError("A negative number of rows requested. Please "
"provide positive value.")
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis, is_copy=False)
_shared_docs['pipe'] = (r"""
Apply func(self, \*args, \*\*kwargs)
Parameters
----------
func : function
function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
See Also
--------
pandas.DataFrame.apply
pandas.DataFrame.applymap
pandas.Series.map
""")
@Appender(_shared_docs['pipe'] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
_shared_docs['aggregate'] = ("""
Aggregate using callable, string, dict, or list of string/callables
%(versionadded)s
Parameters
----------
func : callable, string, dictionary, or list of string/callables
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply. For
a DataFrame, can pass a dict, if the keys are DataFrame column names.
Accepted Combinations are:
- string function name
- function
- list of functions
- dict of column names -> functions (or list of functions)
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
`agg` is an alias for `aggregate`. Use the alias.
Returns
-------
aggregated : %(klass)s
""")
_shared_docs['transform'] = ("""
Call function producing a like-indexed %(klass)s
and return a %(klass)s with the transformed values
.. versionadded:: 0.20.0
Parameters
----------
func : callable, string, dictionary, or list of string/callables
To apply to column
Accepted Combinations are:
- string function name
- function
- list of functions
- dict of column names -> functions (or list of functions)
Returns
-------
transformed : %(klass)s
Examples
--------
>>> df = pd.DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'],
... index=pd.date_range('1/1/2000', periods=10))
df.iloc[3:7] = np.nan
>>> df.transform(lambda x: (x - x.mean()) / x.std())
A B C
2000-01-01 0.579457 1.236184 0.123424
2000-01-02 0.370357 -0.605875 -1.231325
2000-01-03 1.455756 -0.277446 0.288967
2000-01-04 NaN NaN NaN
2000-01-05 NaN NaN NaN
2000-01-06 NaN NaN NaN
2000-01-07 NaN NaN NaN
2000-01-08 -0.498658 1.274522 1.642524
2000-01-09 -0.540524 -1.012676 -0.828968
2000-01-10 -1.366388 -0.614710 0.005378
See also
--------
pandas.%(klass)s.aggregate
pandas.%(klass)s.apply
""")
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(self, other, method=None, **kwargs):
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name):
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (name in self._internal_names_set or name in self._metadata or
name in self._accessors):
return object.__getattribute__(self, name)
else:
if name in self._info_axis:
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
"""After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn("Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2)
object.__setattr__(self, name, value)
# ----------------------------------------------------------------------
# Getting and setting elements
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self):
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : boolean, default False
If False return new object, otherwise modify existing object
Returns
-------
consolidated : type of caller
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
def consolidate(self, inplace=False):
"""
DEPRECATED: consolidate will be an internal implementation only.
"""
# 15483
warnings.warn("consolidate is deprecated and will be removed in a "
"future release.", FutureWarning, stacklevel=2)
return self._consolidate(inplace)
@property
def _is_mixed_type(self):
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self):
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
@property
def _is_datelike_mixed_type(self):
f = lambda: self._data.is_datelike_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value):
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
try:
if np.isnan(value):
return True
except:
pass
raise TypeError('Cannot do inplace boolean setting on '
'mixed-types with a non np.nan value')
return True
def _get_numeric_data(self):
return self._constructor(
self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
def as_matrix(self, columns=None):
"""
Convert the frame to its Numpy-array representation.
Parameters
----------
columns: list, optional, default:None
If None, return all columns, otherwise, returns specified columns.
Returns
-------
values : ndarray
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
Return is NOT a Numpy-matrix, rather, a Numpy-array.
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcase to
int32. By numpy.find_common_type convention, mixing int64 and uint64
will result in a flot64 dtype.
This method is provided for backwards compatibility. Generally,
it is recommended to use '.values'.
See Also
--------
pandas.DataFrame.values
"""
self._consolidate_inplace()
if self._AXIS_REVERSED:
return self._data.as_matrix(columns).T
return self._data.as_matrix(columns)
@property
def values(self):
"""Numpy representation of NDFrame
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By numpy.find_common_type convention, mixing int64 and uint64
will result in a flot64 dtype.
"""
return self.as_matrix()
@property
def _values(self):
"""internal implementation"""
return self.values
@property
def _get_values(self):
# compat
return self.as_matrix()
def get_values(self):
"""same as values (but handles sparseness conversions)"""
return self.as_matrix()
def get_dtype_counts(self):
"""Return the counts of dtypes in this object."""
from pandas import Series
return Series(self._data.get_dtype_counts())
def get_ftype_counts(self):
"""Return the counts of ftypes in this object."""
from pandas import Series
return Series(self._data.get_ftype_counts())
@property
def dtypes(self):
"""Return the dtypes in this object."""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis,
dtype=np.object_)
@property
def ftypes(self):
"""
Return the ftypes (indication of sparse/dense and dtype)
in this object.
"""
from pandas import Series
return Series(self._data.get_ftypes(), index=self._info_axis,
dtype=np.object_)
def as_blocks(self, copy=True):
"""
Convert the frame to a dict of dtype -> Constructor Types that each has
a homogeneous dtype.
.. deprecated:: 0.21.0
NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in
as_matrix)
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> Constructor Types
"""
warnings.warn("as_blocks is deprecated and will "
"be removed in a future version",
FutureWarning, stacklevel=2)
return self._to_dict_of_blocks(copy=copy)
@property
def blocks(self):
"""
Internal property, property synonym for as_blocks()
.. deprecated:: 0.21.0
"""
return self.as_blocks()
def _to_dict_of_blocks(self, copy=True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()}
@deprecate_kwarg(old_arg_name='raise_on_error', new_arg_name='errors',
mapping={True: 'raise', False: 'ignore'})
def astype(self, dtype, copy=True, errors='raise', **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True.
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'.
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
.. versionadded:: 0.20.0
raise_on_error : raise on invalid input
.. deprecated:: 0.20.0
Use ``errors`` instead
kwargs : keyword arguments to pass on to the constructor
Returns
-------
casted : type of caller
Examples
--------
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> ser.astype('category', ordered=True, categories=[2, 1])
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1,2])
>>> s2 = s1.astype('int', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
See also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError('Only the Series name can be used for '
'the key in Series dtype mappings.')
new_type = dtype[self.name]
return self.astype(new_type, copy, errors, **kwargs)
elif self.ndim > 2:
raise NotImplementedError(
'astype() only accepts a dtype arg of type dict when '
'invoked on Series and DataFrames. A single dtype must be '
'specified when invoked on a Panel.'
)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError('Only a column name can be used for the '
'key in a dtype mappings argument.')
from pandas import concat
results = []
for col_name, col in self.iteritems():
if col_name in dtype:
results.append(col.astype(dtype[col_name], copy=copy))
else:
results.append(results.append(col.copy() if copy else col))
return concat(results, axis=1, copy=False)
# else, only a single dtype is given
new_data = self._data.astype(dtype=dtype, copy=copy, errors=errors,
**kwargs)
return self._constructor(new_data).__finalize__(self)
def copy(self, deep=True):
"""
Make a copy of this objects data.
Parameters
----------
deep : boolean or string, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices or the data are copied.
Note that when ``deep=True`` data is copied, actual python objects
will not be copied recursively, only the reference to the object.
This is in contrast to ``copy.deepcopy`` in the Standard Library,
which recursively copies object data.
Returns
-------
copy : type of caller
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self, deep=True):
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _convert(self, datetime=False, numeric=False, timedelta=False,
coerce=False, copy=True):
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : boolean, default False
If True, convert to date where possible.
numeric : boolean, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : boolean, default False
If True, convert to timedelta where possible.
coerce : boolean, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT)
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
return self._constructor(
self._data.convert(datetime=datetime, numeric=numeric,
timedelta=timedelta, coerce=coerce,
copy=copy)).__finalize__(self)
# TODO: Remove in 0.18 or 2017, which ever is sooner
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
"""
Deprecated.
Attempt to infer better dtype for object columns
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
See Also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Return a fixed frequency timedelta index,
with day as the default.
Returns
-------
converted : same as input object
"""
msg = ("convert_objects is deprecated. To re-infer data dtypes for "
"object columns, use {klass}.infer_objects()\nFor all "
"other conversions use the data-type specific converters "
"pd.to_datetime, pd.to_timedelta and pd.to_numeric."
).format(klass=self.__class__.__name__)
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._constructor(
self._data.convert(convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy)).__finalize__(self)
def infer_objects(self):
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
See Also
--------
pandas.to_datetime : Convert argument to datetime.
pandas.to_timedelta : Convert argument to timedelta.
pandas.to_numeric : Convert argument to numeric typeR
Returns
-------
converted : same type as input object
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(datetime=True, numeric=False,
timedelta=True, coerce=False,
copy=True)).__finalize__(self)
# ----------------------------------------------------------------------
# Filling NA's
_shared_docs['fillna'] = ("""
Fill NA/NaN values using the specified method
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). (values not
in the dict/Series/DataFrame will not be filled). This value cannot
be a list.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
axis : %(axes_single_arg)s
inplace : boolean, default False
If True, fill in place. Note: this will modify any
other views on this object, (e.g. a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
See Also
--------
reindex, asfreq
Returns
-------
filled : %(klass)s
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
""")
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None):
inplace = validate_bool_kwarg(inplace, 'inplace')
if isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
method = missing.clean_fill_method(method)
from pandas import DataFrame
if value is None:
if method is None:
raise ValueError('must specify a fill method or value')
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
# > 3d
if self.ndim > 3:
raise NotImplementedError('Cannot fillna with a method for > '
'3dims')
# 3d
elif self.ndim == 3:
# fill in 2d chunks
result = dict([(col, s.fillna(method=method, value=value))
for col, s in self.iteritems()])
new_obj = self._constructor.\
from_dict(result).__finalize__(self)
new_data = new_obj._data
else:
# 2d or less
method = missing.clean_fill_method(method)
new_data = self._data.interpolate(method=method, axis=axis,
limit=limit, inplace=inplace,
coerce=True,
downcast=downcast)
else:
if method is not None:
raise ValueError('cannot specify both a fill method and value')
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
from pandas import Series
value = Series(value)
elif not is_list_like(value):
pass
else:
raise ValueError("invalid fill value with a %s" %
type(value))
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
downcast=downcast)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError('Currently only can fill '
'with dict/Series column '
'by column')
result = self if inplace else self.copy()
for k, v in compat.iteritems(value):
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(value=value, limit=limit,
inplace=inplace,
downcast=downcast)
elif isinstance(value, DataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError("invalid fill value with a %s" % type(value))
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna(method='ffill') <DataFrame.fillna>`
"""
return self.fillna(method='ffill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna(method='bfill') <DataFrame.fillna>`
"""
return self.fillna(method='bfill', axis=axis, inplace=inplace,
limit=limit, downcast=downcast)
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad', axis=None):
"""
Replace values given in 'to_replace' with 'value'.
Parameters
----------
to_replace : str, regex, list, dict, Series, numeric, or None
* str or regex:
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str and regex rules apply as above.
* dict:
- Nested dictionaries, e.g., {'a': {'b': nan}}, are read as
follows: look in column 'a' for the value 'b' and replace it
with nan. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
- Keys map to column names and values map to substitution
values. You can treat this as a special case of passing two
lists except that you are specifying the column to search in.
* None:
- This means that the ``regex`` argument must be a string,
compiled regular expression, or list, dict, ndarray or Series
of such elements. If `value` is also ``None`` then this
**must** be a nested dictionary or ``Series``.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to use to fill holes (e.g. 0), alternately a dict of values
specifying which value to use for each column (columns not in the
dict will not be filled). Regular expressions, strings and lists or
dicts of such objects are also allowed.
inplace : boolean, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Otherwise, `to_replace` must be ``None`` because this
parameter will be interpreted as a regular expression or a list,
dict, or array of regular expressions.
method : string, optional, {'pad', 'ffill', 'bfill'}
The method to use when for replacement, when ``to_replace`` is a
``list``.
See Also
--------
NDFrame.reindex
NDFrame.asfreq
NDFrame.fillna
Returns
-------
filled : NDFrame
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not ``None``.
TypeError
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable into a
regular expression or is a list, dict, ndarray, or Series.
ValueError
* If `to_replace` and `value` are ``list`` s or ``ndarray`` s, but
they are not the same length.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point numbers
*are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is "
"not a bool")
if axis is not None:
warnings.warn('the "axis" argument is deprecated '
'and will be removed in'
'v0.13; this argument has no effect')
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
return _single_replace(self, to_replace, method, inplace,
limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError('If "to_replace" and "value" are both None'
' and "to_replace" is not a list, then '
'regex must be a mapping')
to_replace = regex
regex = True
items = list(compat.iteritems(to_replace))
keys, values = lzip(*items) or ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError("If a nested mapping is passed, all values"
" of the top level mapping must be "
"mappings")
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = lzip(*v.items()) or ([], [])
if set(keys) & set(values):
raise ValueError("Replacement not allowed with "
"overlapping keys and values")
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(to_replace, value, inplace=inplace,
limit=limit, regex=regex)
else:
# need a non-zero len on all axes
for a in self._AXIS_ORDERS:
if not len(self._get_axis(a)):
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in compat.iteritems(to_replace):
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursivelly
res[c] = res[c].replace(to_replace=src,
value=value[c],
inplace=False,
regex=regex)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in compat.iteritems(to_replace)
if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert)
else:
raise TypeError('value argument must be scalar, dict, or '
'Series')
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError('Replacement lists must match '
'in length. Expecting %d got %d ' %
(len(to_replace), len(value)))
new_data = self._data.replace_list(src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex)
else: # [NA, ''] -> 0
new_data = self._data.replace(to_replace=to_replace,
value=value, inplace=inplace,
regex=regex)
elif to_replace is None:
if not (is_re_compilable(regex) or
is_list_like(regex) or is_dict_like(regex)):
raise TypeError("'regex' must be a string or a compiled "
"regular expression or a list or dict of "
"strings or regular expressions, you "
"passed a"
" {0!r}".format(type(regex).__name__))
return self.replace(regex, value, inplace=inplace, limit=limit,
regex=True)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in compat.iteritems(value):
if k in self:
new_data = new_data.replace(to_replace=to_replace,
value=v, filter=[k],
inplace=inplace,
regex=regex)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(to_replace=to_replace,
value=value, inplace=inplace,
regex=regex)
else:
msg = ('Invalid "to_replace" type: '
'{0!r}').format(type(to_replace).__name__)
raise TypeError(msg) # pragma: no cover
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs['interpolate'] = """
Please note that only ``method='linear'`` is supported for
DataFrames/Series with a MultiIndex.
Parameters
----------
method : {'linear', 'time', 'index', 'values', 'nearest', 'zero',
'slinear', 'quadratic', 'cubic', 'barycentric', 'krogh',
'polynomial', 'spline', 'piecewise_polynomial',
'from_derivatives', 'pchip', 'akima'}
* 'linear': ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
default
* 'time': interpolation works on daily and higher resolution
data to interpolate given length of interval
* 'index', 'values': use the actual numerical values of the index
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'polynomial' is passed to
``scipy.interpolate.interp1d``. Both 'polynomial' and 'spline'
require that you also specify an `order` (int),
e.g. df.interpolate(method='polynomial', order=4).
These use the actual numerical values of the index.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
are all wrappers around the scipy interpolation methods of
similar names. These use the actual numerical values of the
index. For more information on their behavior, see the
`scipy documentation
<http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `tutorial documentation
<http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__
* 'from_derivatives' refers to BPoly.from_derivatives which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18
.. versionadded:: 0.18.1
Added support for the 'akima' method
Added interpolate method 'from_derivatives' which replaces
'piecewise_polynomial' in scipy 0.18; backwards-compatible with
scipy < 0.18
axis : {0, 1}, default 0
* 0: fill column-by-column
* 1: fill row-by-row
limit : int, default None.
Maximum number of consecutive NaNs to fill. Must be greater than 0.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
.. versionadded:: 0.17.0
inplace : bool, default False
Update the NDFrame in place if possible.
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
kwargs : keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame of same shape interpolated at the NaNs
See Also
--------
reindex, replace, fillna
Examples
--------
Filling in NaNs
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s.interpolate()
0 0
1 1
2 2
3 3
dtype: float64
"""
@Appender(_shared_docs['interpolate'] % _shared_doc_kwargs)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', downcast=None, **kwargs):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if self.ndim > 2:
raise NotImplementedError("Interpolate has not been implemented "
"on Panel and Panel 4D objects.")
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
else:
_maybe_transposed_self = self
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if (isinstance(_maybe_transposed_self.index, MultiIndex) and
method != 'linear'):
raise ValueError("Only `method=linear` interpolation is supported "
"on MultiIndexes.")
if _maybe_transposed_self._data.get_dtype_counts().get(
'object') == len(_maybe_transposed_self.T):
raise TypeError("Cannot interpolate with all NaNs.")
# create/use the index
if method == 'linear':
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
if isna(index).any():
raise NotImplementedError("Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating.")
data = _maybe_transposed_self._data
new_data = data.interpolate(method=method, axis=ax, index=index,
values=_maybe_transposed_self, limit=limit,
limit_direction=limit_direction,
inplace=inplace, downcast=downcast,
**kwargs)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
The last row without any NaN is taken (or the last row without
NaN considering only the subset of columns in the case of a DataFrame)
.. versionadded:: 0.19.0 For DataFrame
If there is no good value, NaN is returned for a Series
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array of dates
subset : string or list of strings, default None
if not None use these columns for NaN propagation
Notes
-----
Dates are assumed to be sorted
Raises if this is not the case
Returns
-------
where is scalar
- value or NaN if input is Series
- Series if input is DataFrame
where is Index: same shape object as input
See Also
--------
merge_asof
"""
if isinstance(where, compat.string_types):
from pandas import to_datetime
where = to_datetime(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
elif self.ndim > 2:
raise NotImplementedError("asof is not implemented "
"for {type}".format(type=type(self)))
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side='right')
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs, is_copy=False)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs['isna'] = """
Return a boolean same-sized object indicating if the values are NA.
See Also
--------
%(klass)s.notna : boolean inverse of isna
%(klass)s.isnull : alias of isna
isna : top-level isna
"""
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return isna(self).__finalize__(self)
@Appender(_shared_docs['isna'] % _shared_doc_kwargs)
def isnull(self):
return isna(self).__finalize__(self)
_shared_docs['notna'] = """
Return a boolean same-sized object indicating if the values are
not NA.
See Also
--------
%(klass)s.isna : boolean inverse of notna
%(klass)s.notnull : alias of notna
notna : top-level notna
"""
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return notna(self).__finalize__(self)
@Appender(_shared_docs['notna'] % _shared_doc_kwargs)
def notnull(self):
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace=False):
if ((lower is not None and np.any(isna(lower))) or
(upper is not None and np.any(isna(upper)))):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self.values
mask = isna(result)
with np.errstate(all='ignore'):
if upper is not None:
result = np.where(result >= upper, upper, result)
if lower is not None:
result = np.where(result <= lower, lower, result)
if np.any(mask):
result[mask] = np.nan
axes_dict = self._construct_axes_dict()
result = self._constructor(result, **axes_dict).__finalize__(self)
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
inplace = validate_bool_kwarg(inplace, 'inplace')
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == 'le':
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = pd.Series(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, np.asarray(threshold),
axis)
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(self, lower=None, upper=None, axis=None, inplace=False,
*args, **kwargs):
"""
Trim values at input threshold(s).
Parameters
----------
lower : float or array_like, default None
upper : float or array_like, default None
axis : int or string axis name, optional
Align object with lower and upper along the given axis.
inplace : boolean, default False
Whether to perform the operation in place on the data
.. versionadded:: 0.21.0
Returns
-------
clipped : Series
Examples
--------
>>> df
0 1
0 0.335232 -1.256177
1 -1.367855 0.746646
2 0.027753 -1.176076
3 0.230930 -0.679613
4 1.261967 0.570967
>>> df.clip(-1.0, 0.5)
0 1
0 0.335232 -1.000000
1 -1.000000 0.500000
2 0.027753 -1.000000
3 0.230930 -0.679613
4 0.500000 0.500000
>>> t
0 -0.3
1 -0.2
2 -0.1
3 0.0
4 0.1
dtype: float64
>>> df.clip(t, t + 1, axis=0)
0 1
0 0.335232 -0.300000
1 -0.200000 0.746646
2 0.027753 -0.100000
3 0.230930 0.000000
4 1.100000 0.570967
"""
if isinstance(self, ABCPanel):
raise NotImplementedError("clip is not supported yet for panels")
inplace = validate_bool_kwarg(inplace, 'inplace')
axis = nv.validate_clip_with_axis(axis, args, kwargs)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
if np.any(pd.isnull(lower)):
lower = None
if np.any(pd.isnull(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if ((lower is None or (is_scalar(lower) and is_number(lower))) and
(upper is None or (is_scalar(upper) and is_number(upper)))):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result.clip_lower(lower, axis, inplace=inplace)
if upper is not None:
if inplace:
result = self
result = result.clip_upper(upper, axis, inplace=inplace)
return result
def clip_upper(self, threshold, axis=None, inplace=False):
"""
Return copy of input with values above given value(s) truncated.
Parameters
----------
threshold : float or array_like
axis : int or string axis name, optional
Align object with threshold along the given axis.
inplace : boolean, default False
Whether to perform the operation in place on the data
.. versionadded:: 0.21.0
See Also
--------
clip
Returns
-------
clipped : same type as input
"""
return self._clip_with_one_bound(threshold, method=self.le,
axis=axis, inplace=inplace)
def clip_lower(self, threshold, axis=None, inplace=False):
"""
Return copy of the input with values below given value(s) truncated.
Parameters
----------
threshold : float or array_like
axis : int or string axis name, optional
Align object with threshold along the given axis.
inplace : boolean, default False
Whether to perform the operation in place on the data
.. versionadded:: 0.21.0
See Also
--------
clip
Returns
-------
clipped : same type as input
"""
return self._clip_with_one_bound(threshold, method=self.ge,
axis=axis, inplace=inplace)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, **kwargs):
"""
Group series using mapper (dict or key function, apply given function
to group, return result as series) or by a series of columns.
Parameters
----------
by : mapping, function, str, or iterable
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A str or list of strs
may be passed to group by the columns in ``self``
axis : int, default 0
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels
as_index : boolean, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output
sort : boolean, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. groupby preserves the order of rows within each group.
group_keys : boolean, default True
When calling apply, add group keys to index to identify pieces
squeeze : boolean, default False
reduce the dimensionality of the return type if possible,
otherwise return a consistent type
Examples
--------
DataFrame results
>>> data.groupby(func, axis=0).mean()
>>> data.groupby(['col1', 'col2'])['col3'].mean()
DataFrame with hierarchical index
>>> data.groupby(['col1', 'col2']).mean()
Returns
-------
GroupBy object
"""
from pandas.core.groupby import groupby
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return groupby(self, by=by, axis=axis, level=level, as_index=as_index,
sort=sort, group_keys=group_keys, squeeze=squeeze,
**kwargs)
def asfreq(self, freq, method=None, how=None, normalize=False,
fill_value=None):
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset object, or string
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill
how : {'start', 'end'}, default end
For PeriodIndex only, see PeriodIndex.asfreq
normalize : bool, default False
Whether to reset output index to midnight
fill_value: scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
.. versionadded:: 0.20.0
Returns
-------
converted : type of caller
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
"""
from pandas.core.resample import asfreq
return asfreq(self, freq, method=method, how=how, normalize=normalize,
fill_value=fill_value)
def at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM).
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : type of caller
"""
try:
indexer = self.index.indexer_at_time(time, asof=asof)
return self._take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
Parameters
----------
start_time : datetime.time or string
end_time : datetime.time or string
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
values_between_time : type of caller
"""
try:
indexer = self.index.indexer_between_time(
start_time, end_time, include_start=include_start,
include_end=include_end)
return self._take(indexer, convert=False)
except AttributeError:
raise TypeError('Index must be DatetimeIndex')
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None):
"""
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (DatetimeIndex,
PeriodIndex, or TimedeltaIndex), or pass datetime-like values
to the on or level keyword.
Parameters
----------
rule : string
the offset string or object representing target conversion
axis : int, optional, default 0
closed : {'right', 'left'}
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}
For PeriodIndex only, controls whether to use the start or end of
`rule`
loffset : timedelta
Adjust the resampled time labels
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0
on : string, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
.. versionadded:: 0.19.0
level : string or int, optional
For a MultiIndex, level (name or number) to use for
resampling. Level must be datetime-like.
.. versionadded:: 0.19.0
Notes
-----
To learn more about the offset strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] #select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like)+5
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
freq='A',
periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
Resample by month using 'start' `convention`. Values are assigned to
the first month of the period.
>>> s.resample('M', convention='start').asfreq().head()
2012-01 1.0
2012-02 NaN
2012-03 NaN
2012-04 NaN
2012-05 NaN
Freq: M, dtype: float64
Resample by month using 'end' `convention`. Values are assigned to
the last month of the period.
>>> s.resample('M', convention='end').asfreq()
2012-12 1.0
2013-01 NaN
2013-02 NaN
2013-03 NaN
2013-04 NaN
2013-05 NaN
2013-06 NaN
2013-07 NaN
2013-08 NaN
2013-09 NaN
2013-10 NaN
2013-11 NaN
2013-12 2.0
Freq: M, dtype: float64
For DataFrame objects, the keyword ``on`` can be used to specify the
column instead of the index for resampling.
>>> df = pd.DataFrame(data=9*[range(4)], columns=['a', 'b', 'c', 'd'])
>>> df['time'] = pd.date_range('1/1/2000', periods=9, freq='T')
>>> df.resample('3T', on='time').sum()
a b c d
time
2000-01-01 00:00:00 0 3 6 9
2000-01-01 00:03:00 0 3 6 9
2000-01-01 00:06:00 0 3 6 9
For a DataFrame with MultiIndex, the keyword ``level`` can be used to
specify on level the resampling needs to take place.
>>> time = pd.date_range('1/1/2000', periods=5, freq='T')
>>> df2 = pd.DataFrame(data=10*[range(4)],
columns=['a', 'b', 'c', 'd'],
index=pd.MultiIndex.from_product([time, [1, 2]])
)
>>> df2.resample('3T', level=0).sum()
a b c d
2000-01-01 00:00:00 0 6 12 18
2000-01-01 00:03:00 0 4 8 12
"""
from pandas.core.resample import (resample,
_maybe_process_deprecations)
axis = self._get_axis_number(axis)
r = resample(self, freq=rule, label=label, closed=closed,
axis=axis, kind=kind, loffset=loffset,
convention=convention,
base=base, key=on, level=level)
return _maybe_process_deprecations(r,
how=how,
fill_method=fill_method,
limit=limit)
def first(self, offset):
"""
Convenience method for subsetting initial periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
ts.first('10D') -> First 10 days
Returns
-------
subset : type of caller
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
raise NotImplementedError("'first' only supports a DatetimeIndex "
"index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.isAnchored() and hasattr(offset, '_inc'):
if end_date in self.index:
end = self.index.searchsorted(end_date, side='left')
return self.iloc[:end]
return self.loc[:end]
def last(self, offset):
"""
Convenience method for subsetting final periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Examples
--------
ts.last('5M') -> Last 5 months
Returns
-------
subset : type of caller
"""
from pandas.tseries.frequencies import to_offset
if not isinstance(self.index, DatetimeIndex):
raise NotImplementedError("'last' only supports a DatetimeIndex "
"index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side='right')
return self.iloc[start:]
def rank(self, axis=0, method='average', numeric_only=None,
na_option='keep', ascending=True, pct=False):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
index to direct ranking
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
numeric_only : boolean, default None
Include only float, int, boolean data. Valid only for DataFrame or
Panel objects
na_option : {'keep', 'top', 'bottom'}
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
ascending : boolean, default True
False for ranks by high (1) to low (N)
pct : boolean, default False
Computes percentage rank of data
Returns
-------
ranks : same type as caller
"""
axis = self._get_axis_number(axis)
if self.ndim > 2:
msg = "rank does not make sense when ndim > 2"
raise NotImplementedError(msg)
def ranker(data):
ranks = algos.rank(data.values, axis=axis, method=method,
ascending=ascending, na_option=na_option,
pct=pct)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs['align'] = ("""
Align two objects on their axes with the
specified join method for each axis Index
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None)
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
copy : boolean, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
method : str, default None
limit : int, default None
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions
.. versionadded:: 0.17.0
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects
""")
@Appender(_shared_docs['align'] % _shared_doc_kwargs)
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
from pandas import DataFrame, Series
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, Series):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(dict((c, self) for c in other.columns),
**other._construct_axes_dict())
return df._align_frame(other, join=join, axis=axis,
level=level, copy=copy,
fill_value=fill_value, method=method,
limit=limit, fill_axis=fill_axis)
elif isinstance(other, Series):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(dict((c, other) for c in self.columns),
**self._construct_axes_dict())
return self._align_frame(df, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, DataFrame):
return self._align_frame(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
elif isinstance(other, Series):
return self._align_series(other, join=join, axis=axis, level=level,
copy=copy, fill_value=fill_value,
method=method, limit=limit,
fill_axis=fill_axis)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
def _align_frame(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=np.nan, method=None, limit=None,
fill_axis=0):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(reindexers, copy=copy,
fill_value=fill_value,
allow_dups=True)
# other must be always DataFrame
right = other._reindex_with_indexers({0: [join_index, iridx],
1: [join_columns, cridx]},
copy=copy, fill_value=fill_value,
allow_dups=True)
if method is not None:
left = left.fillna(axis=fill_axis, method=method, limit=limit)
right = right.fillna(axis=fill_axis, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(self, other, join='outer', axis=None, level=None,
copy=True, fill_value=None, method=None, limit=None,
fill_axis=0):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError('cannot align series to a series other than '
'axis 0')
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(other.index, how=join,
level=level,
return_indexers=True)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level,
return_indexers=True)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level,
return_indexers=True)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError('Must specify axis=0 or 1')
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit,
axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
# align the cond to same shape as myself
cond = com._apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join='right', broadcast_axis=1)
else:
if not hasattr(cond, 'shape'):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError('Array conditional must be same shape as '
'self')
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = True if inplace else False
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, pd.DataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
else:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = cond.astype(bool, copy=False)
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, 'align'):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(other, join='left', axis=axis,
level=level, fill_value=np.nan)
# if we are NOT aligned, raise as we cannot where index
if (axis is None and
not all([other._get_axis(i).equals(ax)
for i, ax in enumerate(self.axes)])):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError("cannot align with a higher "
"dimensional NDFrame")
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
try:
new_other = _values_from_object(self).copy()
new_other[icond] = other
other = new_other
except:
try_quick = False
# let's create a new (if we failed at the above
# or not try_quick
if not try_quick:
dtype, fill_value = maybe_promote(other.dtype)
new_other = np.empty(len(icond), dtype=dtype)
new_other.fill(fill_value)
maybe_upcast_putmask(new_other, icond, other)
other = new_other
else:
raise ValueError('Length of replacements must equal '
'series length')
else:
raise ValueError('other must be the same shape as self '
'when an ndarray')
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, 'ndim', 0):
align = True
else:
align = (self._get_axis_number(axis) == 1)
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(mask=cond, new=other, align=align,
inplace=True, axis=block_axis,
transpose=self._AXIS_REVERSED)
self._update_inplace(new_data)
else:
new_data = self._data.where(other=other, cond=cond, align=align,
errors=errors,
try_cast=try_cast, axis=block_axis,
transpose=self._AXIS_REVERSED)
return self._constructor(new_data).__finalize__(self)
_shared_docs['where'] = ("""
Return an object of same shape as self and whose corresponding
entries are from self where `cond` is %(cond)s and otherwise are from
`other`.
Parameters
----------
cond : boolean %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
.. versionadded:: 0.18.1
A callable can be used as cond.
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
.. versionadded:: 0.18.1
A callable can be used as other.
inplace : boolean, default False
Whether to perform the operation in place on the data
axis : alignment axis if needed, default None
level : alignment level if needed, default None
errors : str, {'raise', 'ignore'}, default 'raise'
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
try_cast : boolean, default False
try to cast the result back to the input type (if possible),
raise_on_error : boolean, default True
Whether to raise on invalid data types (e.g. trying to where on
strings)
.. deprecated:: 0.21.0
Returns
-------
wh : same type as caller
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
>>> s.where(s > 1, 10)
0 10.0
1 10.0
2 2.0
3 3.0
4 4.0
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
See Also
--------
:func:`DataFrame.%(name_other)s`
""")
@Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="True",
cond_rev="False", name='where',
name_other='mask'))
def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
if raise_on_error is not None:
warnings.warn(
"raise_on_error is deprecated in "
"favor of errors='raise|ignore'",
FutureWarning, stacklevel=2)
if raise_on_error:
errors = 'raise'
else:
errors = 'ignore'
other = com._apply_if_callable(other, self)
return self._where(cond, other, inplace, axis, level,
errors=errors, try_cast=try_cast)
@Appender(_shared_docs['where'] % dict(_shared_doc_kwargs, cond="False",
cond_rev="True", name='mask',
name_other='where'))
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
if raise_on_error is not None:
warnings.warn(
"raise_on_error is deprecated in "
"favor of errors='raise|ignore'",
FutureWarning, stacklevel=2)
if raise_on_error:
errors = 'raise'
else:
errors = 'ignore'
inplace = validate_bool_kwarg(inplace, 'inplace')
cond = com._apply_if_callable(cond, self)
return self.where(~cond, other=other, inplace=inplace, axis=axis,
level=level, try_cast=try_cast,
errors=errors)
_shared_docs['shift'] = ("""
Shift index by desired number of periods with an optional time freq
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
Increment to use from the tseries module or time rule (e.g. 'EOM').
See Notes.
axis : %(axes_single_arg)s
Notes
-----
If freq is specified then the index values are shifted but the data
is not realigned. That is, use freq if you would like to extend the
index when shifting and preserve the original data.
Returns
-------
shifted : %(klass)s
""")
@Appender(_shared_docs['shift'] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0):
if periods == 0:
return self
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(periods=periods, axis=block_axis)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self, periods=1, axis=0):
"""
Equivalent to `shift` without copying data. The shifted data will
not include the dropped periods and the shifted axis will be smaller
than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
Returns
-------
shifted : same type as caller
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(self, periods=1, freq=None, axis=0):
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, default None
Increment to use from the tseries module or time rule (e.g. 'EOM')
axis : int or basestring
Corresponds to the axis that contains the Index
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
Returns
-------
shifted : NDFrame
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, 'freq', None)
if freq is None:
freq = getattr(index, 'inferred_freq', None)
if freq is None:
msg = 'Freq was not given and was not set in the index'
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, string_types):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = ('Given freq %s does not match PeriodIndex freq %s' %
(freq.rule_code, orig_freq.rule_code))
raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(self, before=None, after=None, axis=None, copy=True):
"""
Truncates a sorted DataFrame/Series before and/or after some
particular index value. If the axis contains only datetime values,
before/after parameters are converted to datetime values.
Parameters
----------
before : date, string, int
Truncate all rows before this index value
after : date, string, int
Truncate all rows after this index value
axis : {0 or 'index', 1 or 'columns'}
* 0 or 'index': apply truncation to rows
* 1 or 'columns': apply truncation to columns
Default is stat axis for given data type (0 for Series and
DataFrames, 1 for Panels)
copy : boolean, default is True,
return a copy of the truncated section
Returns
-------
truncated : type of caller
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
>>> df = pd.DataFrame({'A': [1, 2, 3, 4, 5],
... 'B': [6, 7, 8, 9, 10],
... 'C': [11, 12, 13, 14, 15]},
... index=['a', 'b', 'c', 'd', 'e'])
>>> df.truncate(before='b', after='d')
A B C
b 2 7 12
c 3 8 13
d 4 9 14
The index values in ``truncate`` can be datetimes or string
dates. Note that ``truncate`` assumes a 0 value for any unspecified
date component in a ``DatetimeIndex`` in contrast to slicing which
returns any partially matching dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError('Truncate: %s must be after %s' %
(after, before))
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis),
ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(self, tz, axis=0, level=None, copy=True):
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : string or pytz.timezone object
axis : the axis to convert
level : int, str, default None
If axis ia a MultiIndex, convert a specific level. Otherwise
must be None
copy : boolean, default True
Also make a copy of the underlying data
Returns
-------
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, 'tz_convert'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or '
'PeriodIndex' % ax_name)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result.set_axis(ax, axis=axis, inplace=True)
return result.__finalize__(self)
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer',
False: 'raise'})
def tz_localize(self, tz, axis=0, level=None, copy=True,
ambiguous='raise'):
"""
Localize tz-naive TimeSeries to target time zone.
Parameters
----------
tz : string or pytz.timezone object
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None
copy : boolean, default True
Also make a copy of the underlying data
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
infer_dst : boolean, default False
.. deprecated:: 0.15.0
Attempt to infer fall dst-transition hours based on order
Returns
-------
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous):
if not hasattr(ax, 'tz_localize'):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError('%s is not a valid DatetimeIndex or '
'PeriodIndex' % ax_name)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_localize(ax, tz, ambiguous)
result = self._constructor(self._data, copy=copy)
result.set_axis(ax, axis=axis, inplace=True)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self):
"""
Return an object with absolute value taken--only applicable to objects
that are all numeric.
Returns
-------
abs: type of caller
"""
return np.abs(self)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generates descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
summary: Series/DataFrame of summary statistics
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({ 'object': ['a', 'b', 'c'],
... 'numeric': [1, 2, 3],
... 'categorical': pd.Categorical(['d','e','f'])
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
See Also
--------
DataFrame.count
DataFrame.max
DataFrame.min
DataFrame.mean
DataFrame.std
DataFrame.select_dtypes
"""
if self.ndim >= 3:
msg = "describe is not implemented on Panel or PanelND objects."
raise NotImplementedError(msg)
elif self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
self._check_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (['count', 'mean', 'std', 'min'] +
formatted_percentiles + ['max'])
d = ([series.count(), series.mean(), series.std(), series.min()] +
[series.quantile(x) for x in percentiles] + [series.max()])
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ['count', 'unique']
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if is_datetime64_dtype(data):
asint = data.dropna().values.view('i8')
names += ['top', 'freq', 'first', 'last']
result += [lib.Timestamp(top), freq,
lib.Timestamp(asint.min()),
lib.Timestamp(asint.max())]
else:
names += ['top', 'freq']
result += [top, freq]
return pd.Series(result, index=names, name=data.name)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == 'all':
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.iteritems()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted([x.index for x in ldesc], key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat(ldesc, join_axes=pd.Index([names]), axis=1)
d.columns = data.columns.copy()
return d
def _check_percentile(self, q):
"""Validate percentiles (used by describe and quantile)."""
msg = ("percentiles should all be in the interval [0, 1]. "
"Try {0} instead.")
q = np.asarray(q)
if q.ndim == 0:
if not 0 <= q <= 1:
raise ValueError(msg.format(q / 100.0))
else:
if not all(0 <= qs <= 1 for qs in q):
raise ValueError(msg.format(q / 100.0))
return q
_shared_docs['pct_change'] = """
Percent change over given number of periods.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change
fill_method : str, default 'pad'
How to handle NAs before computing percent changes
limit : int, default None
The number of consecutive NAs to fill before stopping
freq : DateOffset, timedelta, or offset alias string, optional
Increment to use from time series API (e.g. 'M' or BDay())
Returns
-------
chg : %(klass)s
Notes
-----
By default, the percentage change is calculated along the stat
axis: 0, or ``Index``, for ``DataFrame`` and 1, or ``minor`` for
``Panel``. You can change this with the ``axis`` keyword argument.
"""
@Appender(_shared_docs['pct_change'] % _shared_doc_kwargs)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwargs):
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop('axis', self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self.fillna(method=fill_method, limit=limit, axis=axis)
rs = (data.div(data.shift(periods=periods, freq=freq, axis=axis,
**kwargs)) - 1)
if freq is None:
mask = isna(_values_from_object(self))
np.putmask(rs.values, mask, np.nan)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""Add the operations to the cls; evaluate the doc strings again"""
axis_descr, name, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls, 'any', name, name2, axis_descr,
'Return whether any element is True over requested axis',
nanops.nanany)
cls.all = _make_logical_function(
cls, 'all', name, name2, axis_descr,
'Return whether all elements are True over requested axis',
nanops.nanall)
@Substitution(outname='mad',
desc="Return the mean absolute deviation of the values "
"for the requested axis",
name1=name, name2=name2, axis_descr=axis_descr)
@Appender(_num_doc)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level('mad', axis=axis, level=level,
skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls, 'sem', name, name2, axis_descr,
"Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
nanops.nansem)
cls.var = _make_stat_function_ddof(
cls, 'var', name, name2, axis_descr,
"Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
nanops.nanvar)
cls.std = _make_stat_function_ddof(
cls, 'std', name, name2, axis_descr,
"Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
nanops.nanstd)
@Substitution(outname='compounded',
desc="Return the compound percentage of the values for "
"the requested axis", name1=name, name2=name2,
axis_descr=axis_descr)
@Appender(_num_doc)
def compound(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
return (1 + self).prod(axis=axis, skipna=skipna, level=level) - 1
cls.compound = compound
cls.cummin = _make_cum_function(
cls, 'cummin', name, name2, axis_descr, "cumulative minimum",
lambda y, axis: np.minimum.accumulate(y, axis), "min",
np.inf, np.nan)
cls.cumsum = _make_cum_function(
cls, 'cumsum', name, name2, axis_descr, "cumulative sum",
lambda y, axis: y.cumsum(axis), "sum", 0., np.nan)
cls.cumprod = _make_cum_function(
cls, 'cumprod', name, name2, axis_descr, "cumulative product",
lambda y, axis: y.cumprod(axis), "prod", 1., np.nan)
cls.cummax = _make_cum_function(
cls, 'cummax', name, name2, axis_descr, "cumulative max",
lambda y, axis: np.maximum.accumulate(y, axis), "max",
-np.inf, np.nan)
cls.sum = _make_stat_function(
cls, 'sum', name, name2, axis_descr,
'Return the sum of the values for the requested axis',
nanops.nansum)
cls.mean = _make_stat_function(
cls, 'mean', name, name2, axis_descr,
'Return the mean of the values for the requested axis',
nanops.nanmean)
cls.skew = _make_stat_function(
cls, 'skew', name, name2, axis_descr,
'Return unbiased skew over requested axis\nNormalized by N-1',
nanops.nanskew)
cls.kurt = _make_stat_function(
cls, 'kurt', name, name2, axis_descr,
"Return unbiased kurtosis over requested axis using Fisher's "
"definition of\nkurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1\n",
nanops.nankurt)
cls.kurtosis = cls.kurt
cls.prod = _make_stat_function(
cls, 'prod', name, name2, axis_descr,
'Return the product of the values for the requested axis',
nanops.nanprod)
cls.product = cls.prod
cls.median = _make_stat_function(
cls, 'median', name, name2, axis_descr,
'Return the median of the values for the requested axis',
nanops.nanmedian)
cls.max = _make_stat_function(
cls, 'max', name, name2, axis_descr,
"""This method returns the maximum of the values in the object.
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
nanops.nanmax)
cls.min = _make_stat_function(
cls, 'min', name, name2, axis_descr,
"""This method returns the minimum of the values in the object.
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
nanops.nanmin)
@classmethod
def _add_series_only_operations(cls):
"""Add the series only operations to the cls; evaluate the doc
strings again.
"""
axis_descr, name, name2 = _doc_parms(cls)
def nanptp(values, axis=0, skipna=True):
nmax = nanops.nanmax(values, axis, skipna)
nmin = nanops.nanmin(values, axis, skipna)
return nmax - nmin
cls.ptp = _make_stat_function(
cls, 'ptp', name, name2, axis_descr,
"""Returns the difference between the maximum value and the
minimum value in the object. This is the equivalent of the
``numpy.ndarray`` method ``ptp``.""",
nanptp)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core import window as rwindow
@Appender(rwindow.rolling.__doc__)
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, on=None, axis=0, closed=None):
axis = self._get_axis_number(axis)
return rwindow.rolling(self, window=window,
min_periods=min_periods, freq=freq,
center=center, win_type=win_type,
on=on, axis=axis, closed=closed)
cls.rolling = rolling
@Appender(rwindow.expanding.__doc__)
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
axis = self._get_axis_number(axis)
return rwindow.expanding(self, min_periods=min_periods, freq=freq,
center=center, axis=axis)
cls.expanding = expanding
@Appender(rwindow.ewm.__doc__)
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False,
axis=0):
axis = self._get_axis_number(axis)
return rwindow.ewm(self, com=com, span=span, halflife=halflife,
alpha=alpha, min_periods=min_periods, freq=freq,
adjust=adjust, ignore_na=ignore_na, axis=axis)
cls.ewm = ewm
@Appender(_shared_docs['transform'] % _shared_doc_kwargs)
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce "
"aggregated results")
return result
cls.transform = transform
# ----------------------------------------------------------------------
# Misc methods
_shared_docs['valid_index'] = """
Return index for %(position)s non-NA/null value.
Notes
--------
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
Returns
--------
scalar : type of index
"""
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = "{%s}" % ', '.join(["{0} ({1})".format(a, i)
for i, a in enumerate(cls._AXIS_ORDERS)])
name = (cls._constructor_sliced.__name__
if cls._AXIS_LEN > 1 else 'scalar')
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA or empty, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
numeric_only : boolean, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(outname)s : %(name1)s or %(name2)s (if level specified)\n"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
ddof : int, default 1
degrees of freedom
numeric_only : boolean, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(outname)s : %(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
bool_only : boolean, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
Returns
-------
%(outname)s : %(name1)s or %(name2)s (if level specified)\n"""
_cnum_doc = """
Parameters
----------
axis : %(axis_descr)s
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
%(outname)s : %(name1)s\n
See also
--------
pandas.core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
"""
def _make_stat_function(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr)
@Appender(_num_doc)
def stat_func(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna)
return self._reduce(f, name, axis=axis, skipna=skipna,
numeric_only=numeric_only)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna, ddof=ddof)
return self._reduce(f, name, axis=axis, numeric_only=numeric_only,
skipna=skipna, ddof=ddof)
return set_function_name(stat_func, name, cls)
def _make_cum_function(cls, name, name1, name2, axis_descr, desc,
accum_func, accum_func_name, mask_a, mask_b):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr, accum_func_name=accum_func_name)
@Appender("Return {0} over requested axis.".format(desc) +
_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
y = _values_from_object(self).copy()
if (skipna and
issubclass(y.dtype.type, (np.datetime64, np.timedelta64))):
result = accum_func(y, axis)
mask = isna(self)
np.putmask(result, mask, tslib.iNaT)
elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):
mask = isna(self)
np.putmask(y, mask, mask_a)
result = accum_func(y, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(y, axis)
d = self._construct_axes_dict()
d['copy'] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(outname=name, desc=desc, name1=name1, name2=name2,
axis_descr=axis_descr)
@Appender(_bool_doc)
def logical_func(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
if bool_only is not None:
raise NotImplementedError("Option bool_only is not "
"implemented with option level.")
return self._agg_by_level(name, axis=axis, level=level,
skipna=skipna)
return self._reduce(f, axis=axis, skipna=skipna,
numeric_only=bool_only, filter_type='bool',
name=name)
return set_function_name(logical_func, name, cls)
# install the indexes
for _name, _indexer in indexing.get_indexers_list():
NDFrame._create_indexer(_name, _indexer)
| apache-2.0 | -3,662,178,750,696,111,000 | 35.245967 | 102 | 0.523868 | false |
5agado/data-science-learning | graphics/heartbeat/main.py | 1 | 3265 | import bpy
from bpy.app.handlers import persistent
import numpy as np
import pandas as pd
# Blender import system clutter
import sys
from pathlib import Path
SRC_PATH = Path.home() / "Documents/python_workspace/data-science-learning/graphics/heartbeat"
UTILS_PATH = Path.home() / "Documents/python_workspace/data-science-learning"
sys.path.append(str(SRC_PATH))
sys.path.append(str(UTILS_PATH))
import heartbeat_utils
import importlib
importlib.reload(heartbeat_utils)
def duplicate_object(target, scene, material=None):
new_obj = target.copy()
new_obj.data = target.data.copy()
new_obj.animation_data_clear()
if material:
new_obj.active_material = material.copy()
scene.collection.objects.link(new_obj)
return new_obj
def create_grid(rows, cols):
obj_grid = []
scene = bpy.context.scene
distance_factor = 4
z = 1
for i in range(rows):
row = []
for j in range(cols):
cur_location = (i*distance_factor, j*distance_factor, z)
cube = bpy.data.objects['base_cube']
light = bpy.data.objects['internal_cube_light']
cube_copy = duplicate_object(cube, scene)
light_copy = duplicate_object(light, scene, material=light.active_material)
cube_copy.location = cur_location
light_copy.location = cur_location
emission = light_copy.active_material.node_tree.nodes.get('Emission').inputs[1]
row.append({'cube': cube_copy, 'light': light_copy, 'emission': emission})
obj_grid.append(row)
return obj_grid
def update_time_text(scene, test_data, time_text):
time = test_data['index'].loc[scene.frame_current][10:16]
time_text.data.body = time
def min_max_norm(vals: pd.Series, min_val=0., max_val=1.):
return min_val + (((vals - vals.min()) * (max_val - min_val)) / (vals.max() - vals.min()))
def main():
DAYS = 7
WEEKS = 5
NB_VALUES = 60*24 # minutes per day
NUM_FRAMES = NB_VALUES
bpy.context.scene.frame_start = 0
bpy.context.scene.frame_end = NUM_FRAMES
# Get HB data
test_data = heartbeat_utils.load_test_data(str(Path.home() / "test_hb.csv"))
# Create grid and base text
obj_grid = create_grid(WEEKS, DAYS)
# Setup text
date_text = bpy.context.scene.objects['text_date']
date_text.data.body = test_data['index'].loc[len(test_data)//2][:7]
time_text = bpy.context.scene.objects['text_time']
@persistent
def update_time_handler(scene):
update_time_text(scene, test_data, time_text)
bpy.app.handlers.frame_change_pre.append(update_time_handler)
# Normalize HB data
min_val = 0.
max_val = 2.
test_data['value'] = min_max_norm(test_data['value'], min_val=min_val, max_val=max_val)
# Animate
for t in range(NB_VALUES):
if t % 100 == 0:
print("Updating frame {}".format(t))
bpy.context.scene.frame_set(t)
for k in range(WEEKS):
for j in range(DAYS):
cur_emission = obj_grid[k][j]['emission']
cur_emission.default_value = test_data.loc[t+((DAYS*k+j)*NB_VALUES)].value
cur_emission.keyframe_insert("default_value", frame=t)
bpy.context.scene.frame_set(0)
main()
| apache-2.0 | -8,655,585,295,226,702,000 | 30.095238 | 94 | 0.637366 | false |
jasondunsmore/heat | heat/tests/test_rpc_client.py | 1 | 16456 | #
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for heat.rpc.client
"""
import copy
import mock
from mox import stubout
from oslo_messaging._drivers import common as rpc_common
from oslo_utils import reflection
from heat.common import exception
from heat.common import identifier
from heat.rpc import client as rpc_client
from heat.tests import common
from heat.tests import utils
class EngineRpcAPITestCase(common.HeatTestCase):
def setUp(self):
super(EngineRpcAPITestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.identity = dict(identifier.HeatIdentifier('engine_test_tenant',
'6',
'wordpress'))
self.rpcapi = rpc_client.EngineClient()
def _to_remote_error(self, error):
"""Converts the given exception to the one with the _Remote suffix."""
exc_info = (type(error), error, None)
serialized = rpc_common.serialize_remote_exception(exc_info)
remote_error = rpc_common.deserialize_remote_exception(
serialized, ["heat.common.exception"])
return remote_error
def test_local_error_name(self):
ex = exception.NotFound()
self.assertEqual('NotFound', self.rpcapi.local_error_name(ex))
exr = self._to_remote_error(ex)
self.assertEqual('NotFound_Remote',
reflection.get_class_name(exr, fully_qualified=False))
self.assertEqual('NotFound', self.rpcapi.local_error_name(exr))
def test_ignore_error_named(self):
ex = exception.NotFound()
exr = self._to_remote_error(ex)
self.rpcapi.ignore_error_named(ex, 'NotFound')
self.rpcapi.ignore_error_named(exr, 'NotFound')
self.assertRaises(
exception.NotFound,
self.rpcapi.ignore_error_named,
ex,
'NotSupported')
self.assertRaises(
exception.NotFound,
self.rpcapi.ignore_error_named,
exr,
'NotSupported')
def _test_engine_api(self, method, rpc_method, **kwargs):
ctxt = utils.dummy_context()
expected_retval = 'foo' if method == 'call' else None
kwargs.pop('version', None)
if 'expected_message' in kwargs:
expected_message = kwargs['expected_message']
del kwargs['expected_message']
else:
expected_message = self.rpcapi.make_msg(method, **kwargs)
cast_and_call = ['delete_stack']
if rpc_method == 'call' and method in cast_and_call:
kwargs['cast'] = False
with mock.patch.object(self.rpcapi, rpc_method) as mock_rpc_method:
mock_rpc_method.return_value = expected_retval
retval = getattr(self.rpcapi, method)(ctxt, **kwargs)
self.assertEqual(expected_retval, retval)
expected_args = [ctxt, expected_message, mock.ANY]
actual_args, _ = mock_rpc_method.call_args
for expected_arg, actual_arg in zip(expected_args,
actual_args):
self.assertEqual(expected_arg, actual_arg)
def test_authenticated_to_backend(self):
self._test_engine_api('authenticated_to_backend', 'call')
def test_list_stacks(self):
default_args = {
'limit': mock.ANY,
'sort_keys': mock.ANY,
'marker': mock.ANY,
'sort_dir': mock.ANY,
'filters': mock.ANY,
'tenant_safe': mock.ANY,
'show_deleted': mock.ANY,
'show_nested': mock.ANY,
'show_hidden': mock.ANY,
'tags': mock.ANY,
'tags_any': mock.ANY,
'not_tags': mock.ANY,
'not_tags_any': mock.ANY,
}
self._test_engine_api('list_stacks', 'call', **default_args)
def test_count_stacks(self):
default_args = {
'filters': mock.ANY,
'tenant_safe': mock.ANY,
'show_deleted': mock.ANY,
'show_nested': mock.ANY,
'show_hidden': mock.ANY,
'tags': mock.ANY,
'tags_any': mock.ANY,
'not_tags': mock.ANY,
'not_tags_any': mock.ANY,
}
self._test_engine_api('count_stacks', 'call', **default_args)
def test_identify_stack(self):
self._test_engine_api('identify_stack', 'call',
stack_name='wordpress')
def test_show_stack(self):
self._test_engine_api('show_stack', 'call', stack_identity='wordpress',
resolve_outputs=True)
def test_preview_stack(self):
self._test_engine_api('preview_stack', 'call', stack_name='wordpress',
template={u'Foo': u'bar'},
params={u'InstanceType': u'm1.xlarge'},
files={u'a_file': u'the contents'},
environment_files=['foo.yaml'],
args={'timeout_mins': u'30'})
def test_create_stack(self):
kwargs = dict(stack_name='wordpress',
template={u'Foo': u'bar'},
params={u'InstanceType': u'm1.xlarge'},
files={u'a_file': u'the contents'},
environment_files=['foo.yaml'],
args={'timeout_mins': u'30'})
call_kwargs = copy.deepcopy(kwargs)
call_kwargs['owner_id'] = None
call_kwargs['nested_depth'] = 0
call_kwargs['user_creds_id'] = None
call_kwargs['stack_user_project_id'] = None
call_kwargs['parent_resource_name'] = None
expected_message = self.rpcapi.make_msg('create_stack', **call_kwargs)
kwargs['expected_message'] = expected_message
self._test_engine_api('create_stack', 'call', **kwargs)
def test_update_stack(self):
self._test_engine_api('update_stack', 'call',
stack_identity=self.identity,
template={u'Foo': u'bar'},
params={u'InstanceType': u'm1.xlarge'},
files={},
environment_files=['foo.yaml'],
args=mock.ANY)
def test_preview_update_stack(self):
self._test_engine_api('preview_update_stack', 'call',
stack_identity=self.identity,
template={u'Foo': u'bar'},
params={u'InstanceType': u'm1.xlarge'},
files={},
environment_files=['foo.yaml'],
args=mock.ANY)
def test_get_template(self):
self._test_engine_api('get_template', 'call',
stack_identity=self.identity)
def test_delete_stack_cast(self):
self._test_engine_api('delete_stack', 'cast',
stack_identity=self.identity)
def test_delete_stack_call(self):
self._test_engine_api('delete_stack', 'call',
stack_identity=self.identity)
def test_validate_template(self):
self._test_engine_api('validate_template', 'call',
template={u'Foo': u'bar'},
params={u'Egg': u'spam'},
files=None,
environment_files=['foo.yaml'],
ignorable_errors=None,
show_nested=False,
version='1.24')
def test_list_resource_types(self):
self._test_engine_api('list_resource_types',
'call',
support_status=None,
type_name=None,
heat_version=None,
version='1.16')
def test_resource_schema(self):
self._test_engine_api('resource_schema', 'call', type_name="TYPE")
def test_generate_template(self):
self._test_engine_api('generate_template', 'call',
type_name="TYPE", template_type='cfn')
def test_list_events(self):
kwargs = {'stack_identity': self.identity,
'limit': None,
'marker': None,
'sort_keys': None,
'sort_dir': None,
'filters': None}
self._test_engine_api('list_events', 'call', **kwargs)
def test_describe_stack_resource(self):
self._test_engine_api('describe_stack_resource', 'call',
stack_identity=self.identity,
resource_name='LogicalResourceId',
with_attr=None)
def test_find_physical_resource(self):
self._test_engine_api('find_physical_resource', 'call',
physical_resource_id=u'404d-a85b-5315293e67de')
def test_describe_stack_resources(self):
self._test_engine_api('describe_stack_resources', 'call',
stack_identity=self.identity,
resource_name=u'WikiDatabase')
def test_list_stack_resources(self):
self._test_engine_api('list_stack_resources', 'call',
stack_identity=self.identity,
nested_depth=0,
with_detail=False,
filters=None,
version=1.25)
def test_stack_suspend(self):
self._test_engine_api('stack_suspend', 'call',
stack_identity=self.identity)
def test_stack_resume(self):
self._test_engine_api('stack_resume', 'call',
stack_identity=self.identity)
def test_stack_cancel_update(self):
self._test_engine_api('stack_cancel_update', 'call',
stack_identity=self.identity,
cancel_with_rollback=False,
version='1.14')
def test_resource_signal(self):
self._test_engine_api('resource_signal', 'call',
stack_identity=self.identity,
resource_name='LogicalResourceId',
details={u'wordpress': []},
sync_call=True)
def test_create_watch_data(self):
self._test_engine_api('create_watch_data', 'call',
watch_name='watch1',
stats_data={})
def test_show_watch(self):
self._test_engine_api('show_watch', 'call',
watch_name='watch1')
def test_show_watch_metric(self):
self._test_engine_api('show_watch_metric', 'call',
metric_namespace=None, metric_name=None)
def test_set_watch_state(self):
self._test_engine_api('set_watch_state', 'call',
watch_name='watch1', state="xyz")
def test_list_software_configs(self):
self._test_engine_api('list_software_configs', 'call',
limit=mock.ANY, marker=mock.ANY,
tenant_safe=mock.ANY)
def test_show_software_config(self):
self._test_engine_api('show_software_config', 'call',
config_id='cda89008-6ea6-4057-b83d-ccde8f0b48c9')
def test_create_software_config(self):
self._test_engine_api('create_software_config', 'call',
group='Heat::Shell',
name='config_mysql',
config='#!/bin/bash',
inputs=[],
outputs=[],
options={})
def test_delete_software_config(self):
self._test_engine_api('delete_software_config', 'call',
config_id='cda89008-6ea6-4057-b83d-ccde8f0b48c9')
def test_list_software_deployments(self):
self._test_engine_api('list_software_deployments', 'call',
server_id=None)
self._test_engine_api('list_software_deployments', 'call',
server_id='9dc13236-d342-451f-a885-1c82420ba5ed')
def test_show_software_deployment(self):
deployment_id = '86729f02-4648-44d8-af44-d0ec65b6abc9'
self._test_engine_api('show_software_deployment', 'call',
deployment_id=deployment_id)
def test_create_software_deployment(self):
self._test_engine_api(
'create_software_deployment', 'call',
server_id='9f1f0e00-05d2-4ca5-8602-95021f19c9d0',
config_id='48e8ade1-9196-42d5-89a2-f709fde42632',
deployment_id='86729f02-4648-44d8-af44-d0ec65b6abc9',
stack_user_project_id='65728b74-cfe7-4f17-9c15-11d4f686e591',
input_values={},
action='INIT',
status='COMPLETE',
status_reason=None)
def test_update_software_deployment(self):
deployment_id = '86729f02-4648-44d8-af44-d0ec65b6abc9'
self._test_engine_api('update_software_deployment', 'call',
deployment_id=deployment_id,
config_id='48e8ade1-9196-42d5-89a2-f709fde42632',
input_values={},
output_values={},
action='DEPLOYED',
status='COMPLETE',
status_reason=None,
updated_at=None)
def test_delete_software_deployment(self):
deployment_id = '86729f02-4648-44d8-af44-d0ec65b6abc9'
self._test_engine_api('delete_software_deployment', 'call',
deployment_id=deployment_id)
def test_show_snapshot(self):
snapshot_id = '86729f02-4648-44d8-af44-d0ec65b6abc9'
self._test_engine_api('show_snapshot', 'call',
stack_identity=self.identity,
snapshot_id=snapshot_id)
def test_stack_snapshot(self):
self._test_engine_api(
'stack_snapshot', 'call', stack_identity=self.identity,
name='snap1')
def test_delete_snapshot(self):
snapshot_id = '86729f02-4648-44d8-af44-d0ec65b6abc9'
self._test_engine_api('delete_snapshot', 'call',
stack_identity=self.identity,
snapshot_id=snapshot_id)
def test_list_services(self):
self._test_engine_api('list_services', 'call', version='1.4')
def test_stack_list_outputs(self):
self._test_engine_api(
'list_outputs', 'call', stack_identity=self.identity,
version='1.19'
)
def test_stack_show_output(self):
self._test_engine_api(
'show_output', 'call', stack_identity=self.identity,
output_key='test', version='1.19')
def test_export_stack(self):
self._test_engine_api('export_stack',
'call',
stack_identity=self.identity,
version='1.22')
def test_resource_mark_unhealthy(self):
self._test_engine_api('resource_mark_unhealthy', 'call',
stack_identity=self.identity,
resource_name='LogicalResourceId',
mark_unhealthy=True,
resource_status_reason="Any reason",
version='1.26')
| apache-2.0 | -411,708,305,636,292,000 | 39.833747 | 79 | 0.521451 | false |
TravisCouture/GtManipulator | gtmanipulator/gtmanipulator_tests/test.py | 1 | 2058 | import unittest
import os
import pandas as pd
import sys
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
if FILE_DIR + "/../" not in sys.path:
sys.path.append(FILE_DIR + "/../")
import gtmanipulator as gt
class GtClassTestCase(unittest.TestCase):
def test_summarize(self):
test_data = gt.GtManipulator(FILE_DIR + "/../sample_data/sample_data.csv")
correct_data = pd.read_csv(FILE_DIR + "/../sample_data/sample_data_summarized.csv",
header=0, index_col=0, delimiter=",")
pd.util.testing.assert_frame_equal(correct_data, test_data.summarize())
def test_alleles(self):
test_data = gt.GtManipulator(FILE_DIR + "/../sample_data/sample_data.csv")
correct_data = pd.read_csv(FILE_DIR + "/../sample_data/sample_data_alleles.csv",
header=0, index_col=0, delimiter=",")
test_data.summarize()
pd.util.testing.assert_frame_equal(correct_data, test_data.calc_frequencies())
def test_alleles_missing(self):
test_data = gt.GtManipulator(FILE_DIR + "/../sample_data/sample_data.csv")
correct_data = pd.read_csv(FILE_DIR + "/../sample_data/sample_data_alleles.csv",
header=0, index_col=0, delimiter=",")
test_data.summarize()
try:
pd.util.testing.assert_series_equal(correct_data["MissingCount"],
test_data.calc_frequencies({"--"})["MissingCount"])
except AssertionError:
pass
def test_morphisms(self):
test_data = gt.GtManipulator(FILE_DIR + "/../sample_data/sample_data.csv")
correct_data = pd.read_csv(FILE_DIR + "/../sample_data/sample_data_morphisms.csv",
header=0, index_col=0, delimiter=",")
test_data.summarize()
test_data.calc_frequencies()
pd.util.testing.assert_frame_equal(correct_data, test_data.determine_morphisms())
if __name__ == "__main__":
unittest.main()
| mit | -7,867,534,457,927,676,000 | 42.787234 | 99 | 0.587949 | false |
dombrno/PG | Tests/test_dos.py | 1 | 1817 | import numpy as np
import sys
from os.path import expanduser
HOME = expanduser("~")
if "storage" in HOME:
HOME = "/storage/home/geffroy"
sys.path.append(HOME + "/Code/PG/Source")
from phase_fluctuations import SWaveModel
from MCMC import MCMCDriver
# pylint: disable=E1101
T_CST = 0.25
TARGET_SNAPSHOTS = 6
MC_INTERVALS = 100
TEMPERATURE = 500
THERMALIZATION_STEPS = 10000
ROOT_PHASE_SEED = 123456789
ROOT_MC_SEED = 23456
DOS_FILENAMES = ["dos_swave.txt"]
SAVE_FORMAT = '%1.4e'
TMP_NAME = 'CHECK.txt'
BCS_PARAMS = {"width": 24, "chem_potential": 0.0,
"hopping_constant": T_CST, "J_constant": 0.112 * T_CST,
"g_constant": 1.0, "delta": 1.0 * T_CST, "use_assaad": True,
"uniform_phase": False, "temperature": TEMPERATURE,
"seed": ROOT_PHASE_SEED}
MC_PARAMS = {"seed": ROOT_MC_SEED, "intervals": MC_INTERVALS,
"target_snapshots": TARGET_SNAPSHOTS,
"observable_list": ["DOS"]}
def create_dos_files():
"""Create the reference test files"""
my_model = SWaveModel(BCS_PARAMS)
my_driver = MCMCDriver(my_model, MC_PARAMS)
my_driver.thermalize(THERMALIZATION_STEPS)
my_driver.execute()
dos_values = my_driver.result.observable_results["DOS"]['DOS_values']
np.savetxt(DOS_FILENAMES[0], dos_values, fmt=SAVE_FORMAT)
def test_swave_dos():
"""run the actual tests with nosetest"""
my_model = SWaveModel(BCS_PARAMS)
my_driver = MCMCDriver(my_model, MC_PARAMS)
my_driver.thermalize(THERMALIZATION_STEPS)
my_driver.execute()
dos_values = my_driver.result.observable_results["DOS"]['DOS_values']
np.savetxt(TMP_NAME, dos_values, fmt=SAVE_FORMAT)
assert np.all(np.loadtxt(TMP_NAME) ==
np.loadtxt(DOS_FILENAMES[0]))
if __name__ == "__main__":
create_dos_files()
| bsd-2-clause | 2,669,809,737,027,920,000 | 32.036364 | 74 | 0.653825 | false |
gmierz/pupil-lib | pupillib/core/utilities/config_store.py | 1 | 1531 | '''
(*)~---------------------------------------------------------------------------
This file is part of Pupil-lib.
Pupil-lib is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pupil-lib is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Pupil-lib. If not, see <https://www.gnu.org/licenses/>.
Copyright (C) 2018 Gregory W. Mierzwinski
---------------------------------------------------------------------------~(*)
'''
from frozendict import frozendict
class ConfigStore:
class innerConfigStore:
def __init__(self, config):
self.frozen_config = frozendict(config)
instance = None
@staticmethod
def get_instance(a_dict=None):
if a_dict is None:
a_dict = dict()
if not ConfigStore.instance:
ConfigStore.instance = ConfigStore.innerConfigStore(a_dict)
else:
return ConfigStore.instance
@staticmethod
def set_instance(a_dict=None):
if a_dict is None:
a_dict = dict()
if not ConfigStore.instance:
ConfigStore.instance = ConfigStore.innerConfigStore(a_dict) | gpl-3.0 | -4,799,868,825,043,542,000 | 32.304348 | 79 | 0.624428 | false |
Ignalion/bookshelf | app/endpoints/register.py | 1 | 1115 | """
Registering user into our system. Now we'll be watching you.
"""
from flask import render_template, request, redirect, url_for, views
from flask_login import current_user
from app.forms import RegisterForm
from app.lib.abstract import UserAbstraction
class RegisterView(views.View):
""" Register user with data, provided by himself on the register page """
methods = ('GET', 'POST',)
def dispatch_request(self):
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegisterForm()
if request.method == 'POST':
if form.validate_on_submit():
user_mgr = UserAbstraction()
password = user_mgr.set_password(form.password.data)
form.password.data = password
user_mgr.create(**form.data)
return redirect(url_for('index'))
return render_template('register.html',
form=form,
page_title='Register',
user=current_user,
)
| gpl-2.0 | -3,281,259,100,706,968,600 | 31.794118 | 77 | 0.569507 | false |
renefs87/project-payment-manager | app/djmoney/models/managers.py | 1 | 2679 | try:
from django.utils.encoding import smart_unicode
except ImportError:
# Python 3
from django.utils.encoding import smart_text as smart_unicode
from djmoney.utils import get_currency_field_name
def _expand_money_params(kwargs):
from moneyed import Money
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError:
# Django < 1.5
LOOKUP_SEP = '__'
from django.db.models.sql.constants import QUERY_TERMS
to_append = {}
for name, value in kwargs.items():
if isinstance(value, Money):
# Get rid of __lt, __gt etc for the currency lookup
path = name.split(LOOKUP_SEP)
if path[-1] in QUERY_TERMS:
clean_name = LOOKUP_SEP.join(path[:-1])
else:
clean_name = name
to_append[name] = value.amount
to_append[get_currency_field_name(clean_name)] = smart_unicode(
value.currency)
kwargs.update(to_append)
return kwargs
def understands_money(func):
"""
Used to wrap a queryset method with logic to expand
a query from something like:
mymodel.objects.filter(money=Money(100,"USD"))
To something equivalent to:
mymodel.objects.filter(money=Decimal("100.0), money_currency="USD")
"""
def decorator(*args, **kwargs):
kwargs = _expand_money_params(kwargs)
return func(*args, **kwargs)
return decorator
RELEVANT_QUERYSET_METHODS = ['dates', 'distinct', 'extra', 'get',
'get_or_create', 'filter', 'complex_filter',
'exclude', 'in_bulk', 'iterator', 'latest',
'order_by', 'select_related', 'values']
def add_money_comprehension_to_queryset(qs):
# Decorate each relevant method with understand_money in the queryset given
list(map(lambda attr: setattr(qs, attr, understands_money(getattr(qs, attr))),
RELEVANT_QUERYSET_METHODS))
return qs
def money_manager(manager):
"""
Wraps a model managers get_query_set method so that each query set it returns
is able to work on money fields.
We use this instead of a real model manager, in order to allow users of django-money to
use other managers special managers while still doing money queries.
"""
old_get_query_set = manager.get_query_set
def get_query_set(*args, **kwargs):
return add_money_comprehension_to_queryset(old_get_query_set(*args, **kwargs))
manager.get_query_set = get_query_set
if hasattr(manager, 'get_queryset'):
# Django 1.6
manager.get_queryset = get_query_set
return manager
| gpl-3.0 | -2,300,557,047,475,128,600 | 29.793103 | 91 | 0.630086 | false |
mirkobronzi/finance-analyzer | test/test_data_analyzer.py | 1 | 4608 | '''tests for data_analyser.py'''
from datetime import date
import unittest
from lib.data_analyzer import MonthData
from lib.data_analyzer import DataContainer
from lib.entries import Entries, Entry
class TestDataAnalyzer(unittest.TestCase):
def test_month_data__simple(self):
month_data = MonthData('name',
Entries(Entry(date.today(), 'entry1', 1, 2)))
self.assertEqual(month_data.month_name,'name')
self.assertEqual(month_data.money_out, 1)
self.assertEqual(month_data.money_in, 2)
def test_month_data__check_sum(self):
month_data = MonthData('name',
Entries(Entry(date.today(), 'entry1', 1, 10),
Entry(date.today(), 'entry2', 2, 20)))
self.assertEqual(month_data.money_out, 3)
self.assertEqual(month_data.money_in, 30)
def test_analyze_data_by_month_simple(self):
curr_date = date(2014, 10, 03)
exp = {'cat': {(2014, 10): MonthData((2014, 10),
Entries(Entry(curr_date, 'entry1', 1, 2)))}}
got = DataContainer.analyze_data_by_month(
{'cat': Entries(Entry(curr_date, 'entry1', 1, 2))})
self.assertEqual(exp, got[0])
def test_analyze_data_by_month_more_entries(self):
curr_date = date(2014, 10, 03)
exp = {'cat': {(2014, 10): MonthData((2014, 10),
Entries(Entry(curr_date, 'entry1', 1, 2),
Entry(curr_date, 'entry2', 10, 20)))}}
got = DataContainer.analyze_data_by_month(
{'cat': Entries(Entry(curr_date, 'entry1', 1, 2),
Entry(curr_date, 'entry2', 10, 20))})
self.assertEqual(got[0], exp)
def test_collapse_data_by_month_simple(self):
day1 = date(2014, 10, 03)
day2 = date(2014, 11, 05)
entries = {'cat1': {(2014, 10): MonthData((2014, 10),
Entries(Entry(day1, 'entry1', 1, 2))),
(2014, 11): MonthData((2014, 11),
Entries(Entry(day2, 'entry2', 100, 200)))},
'cat2': {(2014, 10): MonthData((2014, 10),
Entries(Entry(day1, 'entry3', 10, 20)))}}
sut = DataContainer(['cat1', 'cat2'], entries, [(2014, 10), (2014, 11)])
self.assertEqual([(11, 22), (100, 200)], sut.collapsed_data_by_month())
def test_data_container_get_year(self):
day1 = date(2014, 10, 03)
day2 = date(2015, 11, 05)
entries = {'cat1': {(2014, 10): MonthData((2014, 10),
Entries(Entry(day1, 'entry1', 1, 2))),
(2015, 11): MonthData((2015, 11),
Entries(Entry(day2, 'entry2', 100, 200)))},
'cat2': {(2014, 11): MonthData((2014, 10),
Entries(Entry(day1, 'entry3', 10, 20)))}}
sut = DataContainer(['cat1', 'cat2'], entries, [(2014, 10), (2015, 11)])
self.assertEqual({2014, 2015}, sut.get_years())
def test_organize_category_simple(self):
entries = Entries(Entry(date(2014, 10, 03), 'entry', 1, 2))
retrieved = DataContainer._organize_categories(entries, {'entry': 'cat'})
self.assertEqual({'cat': entries}, retrieved)
def test_organize_category_more_entries(self):
expected =\
{'firstcat' : Entries(Entry(date(2011, 11, 11), 'firstentry', 1, 1)),
'secondcat': Entries(Entry(date(2012, 12, 12), 'secondentry1', 2, 2),
Entry(date(2010, 10, 10), 'secondentry2', 0, 0))}
entries = Entries(Entry(date(2011, 11, 11), 'firstentry', 1, 1),
Entry(date(2012, 12, 12), 'secondentry1', 2, 2),
Entry(date(2010, 10, 10), 'secondentry2', 0, 0))
retrieved = DataContainer._organize_categories(entries,
{'firstentry': 'firstcat',
'secondentry': 'secondcat'})
self.assertEqual(expected, retrieved)
def test_organize_category_custom_category(self):
entries = Entries(
Entry(date(2014, 10, 03), 'entry', 1, 2, {'CAT': 'custom'}))
retrieved = DataContainer._organize_categories(entries, {'entry': 'cat'})
self.assertEqual({'custom': entries}, retrieved)
| gpl-3.0 | -191,709,844,971,507,300 | 48.548387 | 87 | 0.506293 | false |
pdellaert/vspk-examples | python/vm_policies_overview.py | 1 | 21818 | # -*- coding: utf-8 -*-
"""
vm_policies_overview.py is a tool to display the policies that are applied
to one or more VMs.
The script will try and be intelligent and change the Ether type and Protocols
to human readable text.
The script will not touch null or None values, because there is a difference
between None and * for instance
--- Author ---
Philippe Dellaert <[email protected]>
--- Version history ---
2016-05-18 - 0.1.0 - First beta
2016-05-18 - 0.2.0 - Fix unused variable
2016-05-18 - 0.3.0 - Check location and network type and if a fixer exists
2016-05-18 - 0.4.0 - Order of table fields fix
2016-05-18 - 0.5.0 - Fix for fetching data and log output
2016-05-18 - 0.5.1 - Fix for fetching fetcher
2016-05-18 - 0.5.2 - Fix for fetching object instead of array
2016-05-18 - 0.6.0 - Output cleanup
2016-05-18 - 0.6.1 - Missing coma fix
2016-05-18 - 0.6.2 - Fixing typo and applying to all sections with a function
2016-05-18 - 0.6.3 - Fixing second typo
2016-05-18 - 0.6.4 - Fixing output handling
2016-05-18 - 1.0.0 - First stable release
2020-07-06 - 1.1.0 - Migrate to v6 API
--- Usage ---
run 'vm_policies_overview.py -h' for an overview
"""
from __future__ import print_function
from builtins import str
import argparse
import getpass
import json
import logging
from prettytable import PrettyTable
from vspk import v6 as vsdk
ether_types = {
'0x0800': 'IPv4',
'0x0806': 'ARP',
'0x0842': 'Wake-on-LAN',
'0x22F3': 'IETF TRILL',
'0x6003': 'DECnet Phase IV',
'0x8035': 'RARP',
'0x809B': 'AppleTalk ',
'0x80F3': 'AARP',
'0x8100': '802.1Q and 802.1aq',
'0x8137': 'IPX',
'0x8204': 'QNX Qnet',
'0x86DD': 'IPv6',
'0x8808': 'Ethernet flow control',
'0x8819': 'CobraNet',
'0x8847': 'MPLS unicast',
'0x8848': 'MPLS multicast',
'0x8863': 'PPPoE Discovery Stage',
'0x8864': 'PPPoE Session Stage',
'0x8870': 'Jumbo Frames (proposed)',
'0x887B': 'HomePlug 1.0 MME',
'0x888E': 'EAP over LAN (IEEE 802.1X)',
'0x8892': 'PROFINET Protocol',
'0x889A': 'HyperSCSI (SCSI over Ethernet)',
'0x88A2': 'ATA over Ethernet',
'0x88A4': 'EtherCAT Protocol',
'0x88A8': 'Provider Bridging (IEEE 802.1ad) ',
'0x88AB': 'Ethernet Powerlink',
'0x88CC': 'LLDP',
'0x88CD': 'SERCOS III',
'0x88E1': 'HomePlug AV MME',
'0x88E3': 'Media Redundancy Protocol (IEC62439-2)',
'0x88E5': 'MAC security (IEEE 802.1AE)',
'0x88E7': 'Provider Backbone Bridges (PBB) (IEEE 802.1ah)',
'0x88F7': 'Precision Time Protocol (PTP) over Ethernet (IEEE 1588)',
'0x8902': 'IEEE 802.1ag Connectivity Fault Management (CFM) Protocol ',
'0x8906': 'FCoE',
'0x8914': 'FCoE Initialization Protocol',
'0x8915': 'RoCE',
'0x891D': 'TTE',
'0x892F': 'HSR',
'0x9000': 'Ethernet Configuration Testing Protocol'
}
protocols = {
'0': 'HOPOPT',
'1': 'ICMP',
'2': 'IGMP',
'3': 'GGP',
'4': 'IPv4',
'5': 'ST',
'6': 'TCP',
'7': 'CBT',
'8': 'EGP',
'9': 'IGP',
'10': 'BBN-RCC-MON',
'11': 'NVP-II',
'12': 'PUP',
'13': 'ARGUS',
'14': 'EMCON',
'15': 'XNET',
'16': 'CHAOS',
'17': 'UDP',
'18': 'MUX',
'19': 'DCN-MEAS',
'20': 'HMP',
'21': 'PRM',
'22': 'XNS-IDP',
'23': 'TRUNK-1',
'24': 'TRUNK-2',
'25': 'LEAF-1',
'26': 'LEAF-2',
'27': 'RDP',
'28': 'IRTP',
'29': 'ISO-TP4',
'30': 'NETBLT',
'31': 'MFE-NSP',
'32': 'MERIT-INP',
'33': 'DCCP',
'34': '3PC',
'35': 'IDPR',
'36': 'XTP',
'37': 'DDP',
'38': 'IDPR-CMTP',
'39': 'TP++',
'40': 'IL',
'41': 'IPv6',
'42': 'SDRP',
'43': 'IPv6-Route',
'44': 'IPv6-Frag',
'45': 'IDRP',
'46': 'RSVP',
'47': 'GRE',
'48': 'DSR',
'49': 'BNA',
'50': 'ESP',
'51': 'AH',
'52': 'I-NLSP',
'53': 'SWIPE',
'54': 'NARP',
'55': 'MOBILE',
'56': 'TLSP',
'57': 'SKIP',
'58': 'IPv6-ICMP',
'59': 'IPv6-NoNxt',
'60': 'IPv6-Opts',
'62': 'CFTP',
'64': 'SAT-EXPAK',
'65': 'KRYPTOLAN',
'66': 'RVD',
'67': 'IPPC',
'69': 'SAT-MON',
'70': 'VISA',
'71': 'IPCV',
'72': 'CPNX',
'73': 'CPHB',
'74': 'WSN',
'75': 'PVP',
'76': 'BR-SAT-MON',
'77': 'SUN-ND',
'78': 'WB-MON',
'79': 'WB-EXPAK',
'80': 'ISO-IP',
'81': 'VMTP',
'82': 'SECURE-VMTP',
'83': 'VINES',
'84': 'IPTM',
'85': 'NSFNET-IGP',
'86': 'DGP',
'87': 'TCF',
'88': 'EIGRP',
'89': 'OSPFIGP',
'90': 'Sprite-RPC',
'91': 'LARP',
'92': 'MTP',
'93': 'AX.25',
'94': 'IPIP',
'95': 'MICP',
'96': 'SCC-SP',
'97': 'ETHERIP',
'98': 'ENCAP',
'100': 'GMTP',
'101': 'IFMP',
'102': 'PNNI',
'103': 'PIM',
'104': 'ARIS',
'105': 'SCPS',
'106': 'QNX',
'107': 'A/N',
'108': 'IPComp',
'109': 'SNP',
'110': 'Compaq-Peer',
'111': 'IPX-in-IP',
'112': 'VRRP',
'113': 'PGM',
'115': 'L2TP',
'116': 'DDX',
'117': 'IATP',
'118': 'STP',
'119': 'SRP',
'120': 'UTI',
'121': 'SMP',
'122': 'SM',
'123': 'PTP',
'124': 'ISIS over IPv4',
'125': 'FIRE',
'126': 'CRTP',
'127': 'CRUDP',
'128': 'SSCOPMCE',
'129': 'IPLT',
'130': 'SPS',
'131': 'PIPE',
'132': 'SCTP',
'133': 'FC',
'134': 'RSVP-E2E-IGNORE',
'135': 'Mobility Header',
'136': 'UDPLite',
'137': 'MPLS-in-IP',
'138': 'manet',
'139': 'HIP',
'140': 'Shim6',
'141': 'WESP',
'142': 'ROHC',
'255': 'Reserved'
}
configuration = {}
logger = None
output_parser = None
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(description="Tool to gather statistics on domains, zones, subnets or vports within a certain time frame.")
parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true')
parser.add_argument('-j', '--json', required=False, help='Print as JSON, not as a table', dest='json_output', action='store_true')
parser.add_argument('-l', '--log-file', required=False, help='File to log to (default = stdout)', dest='logfile', type=str)
parser.add_argument('-E', '--nuage-enterprise', required=True, help='The enterprise with which to connect to the Nuage VSD/SDK host', dest='nuage_enterprise', type=str)
parser.add_argument('-H', '--nuage-host', required=True, help='The Nuage VSD/SDK endpoint to connect to', dest='nuage_host', type=str)
parser.add_argument('-P', '--nuage-port', required=False, help='The Nuage VSD/SDK server port to connect to (default = 8443)', dest='nuage_port', type=int, default=8443)
parser.add_argument('-p', '--nuage-password', required=False, help='The password with which to connect to the Nuage VSD/SDK host. If not specified, the user is prompted at runtime for a password', dest='nuage_password', type=str)
parser.add_argument('-u', '--nuage-user', required=True, help='The username with which to connect to the Nuage VSD/SDK host', dest='nuage_username', type=str)
parser.add_argument('-S', '--disable-SSL-certificate-verification', required=False, help='Disable SSL certificate verification on connect (deprecated)', dest='nosslcheck', action='store_true')
parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')
parser.add_argument('-V', '--vm', required=False, help='The VM for which to return the applied policies (can be specified multiple times for multiple VMs), if none is specified, information for all VMs will be returned', dest='vm_names', type=str, action='append')
args = parser.parse_args()
return args
def handle_output(output):
"""
Parsing output to correct values
"""
global output_parser
if output['Ether type'] in list(ether_types.keys()):
output['Ether type'] = ether_types[output['Ether type']]
if output['Protocol'] in list(protocols.keys()):
output['Protocol'] = protocols[output['Protocol']]
if output['Source type'] == 'ANY':
output['Source name'] = '*'
if output['Destination type'] == 'ANY':
output['Source name'] = '*'
if not configuration['json_output']:
# Cleanup None values
for key in list(output.keys()):
if output[key] is None:
output[key] = ''
logger.debug('Saving output to output object')
if configuration['json_output']:
output_parser.append(output)
else:
output_parser.add_row([
output['VM Name'],
output['Interface MAC'],
output['ACL type'],
output['Ether type'],
output['Protocol'],
output['Source type'],
output['Source name'],
output['Destination type'],
output['Destination name'],
output['Source port'],
output['Destination port'],
output['DSCP'],
output['Stateful'],
output['Action']
])
def main():
"""
Main function to gather the information on the VM applied policies
"""
global configuration, logger, output_parser
# Handling arguments
args = get_args()
configuration = {}
configuration['debug'] = args.debug
configuration['json_output'] = args.json_output
configuration['log_file'] = None
if args.logfile:
configuration['log_file'] = args.logfile
configuration['nuage_enterprise'] = args.nuage_enterprise
configuration['nuage_host'] = args.nuage_host
configuration['nuage_port'] = args.nuage_port
configuration['nuage_password'] = None
if args.nuage_password:
configuration['nuage_password'] = args.nuage_password
configuration['nuage_username'] = args.nuage_username
# configuration['nosslcheck'] = args.nosslcheck
configuration['verbose'] = args.verbose
configuration['vm_names'] = []
if args.vm_names:
configuration['vm_names'] = args.vm_names
# Logging settings
if configuration['debug']:
log_level = logging.DEBUG
elif configuration['verbose']:
log_level = logging.INFO
else:
log_level = logging.WARNING
logging.basicConfig(filename=configuration['log_file'], format='%(asctime)s %(levelname)s %(message)s', level=log_level)
logger = logging.getLogger(__name__)
# Getting user password for Nuage connection
if configuration['nuage_password'] is None:
logger.debug('No command line Nuage password received, requesting Nuage password from user')
configuration['nuage_password'] = getpass.getpass(prompt='Enter password for Nuage host %s for user %s: ' % (configuration['nuage_host'], configuration['nuage_username']))
try:
# Connecting to Nuage
logger.debug('Connecting to Nuage server %s:%s with username %s' % (configuration['nuage_host'], configuration['nuage_port'], configuration['nuage_username']))
nc = vsdk.NUVSDSession(username=configuration['nuage_username'], password=configuration['nuage_password'], enterprise=configuration['nuage_enterprise'], api_url="https://%s:%s" % (configuration['nuage_host'], configuration['nuage_port']))
nc.start()
except Exception as e:
logger.error('Could not connect to Nuage host %s with user %s and specified password' % (configuration['nuage_host'], configuration['nuage_username']))
logger.critical('Caught exception: %s' % str(e))
return 1
# Setting output correctly
output_fields = [
'VM Name',
'Interface MAC',
'ACL type',
'Ether type',
'Protocol',
'Source type',
'Source name',
'Destination type',
'Destination name',
'Source port',
'Destination port',
'DSCP',
'Stateful',
'Action'
]
# Gathering VMs
vms = []
if configuration['vm_names']:
for vm_name in configuration['vm_names']:
logger.debug('Getting VMs matching the name %s' % vm_name)
entities = nc.user.vms.get(filter='name == "%s"' % vm_name)
vms.extend(entities)
else:
logger.debug('Getting all VMs')
vms = nc.user.vms.get()
# Gathering VM Interfaces
vm_interfaces = []
logger.debug('Getting all VM interfaces for the selected VMs')
for vm in vms:
vm_interfaces.extend(vm.vm_interfaces.get())
# Verifying if there are enities
if len(vm_interfaces) == 0:
logger.critical('No matching vms found')
return 1
# Starting output
if configuration['json_output']:
logger.debug('JSON output enabled, not setting up an output table')
output_parser = []
else:
logger.debug('Setting up output table')
output_parser = PrettyTable(output_fields)
# Gathering ACL rules and handling them
for vm_interface in vm_interfaces:
logger.debug('Gathering VM interface policy decisions')
policy_decisions = vm_interface.policy_decisions.get_first()
ingress_acl_entries = policy_decisions.ingress_acls[0]['entries']
egress_acl_entries = policy_decisions.egress_acls[0]['entries']
forward_acl_entries = policy_decisions.ingress_adv_fwd[0]['entries']
logger.debug('Found %s ingress ACLs and %s egress ACLs' % (len(ingress_acl_entries), len(egress_acl_entries)))
logger.debug('Handling Ingress ACL entries')
for entry in ingress_acl_entries:
acl_rule = None
logger.debug('Using minimal information from the policy decision entry itself')
output = {
'VM Name': vm_interface.parent.name,
'Interface MAC': vm_interface.mac,
'ACL type': 'Ingress',
'Ether type': entry['etherType'],
'Protocol': entry['protocol'],
'Source type': 'VM',
'Source name': vm_interface.parent.name,
'Destination type': entry['destinationType'],
'Destination name': entry['destinationValue'],
'Source port': entry['sourcePort'],
'Destination port': entry['destinationPort'],
'DSCP': entry['DSCP'],
'Stateful': '',
'Action': entry['actionDetails']['actionType']
}
if entry['aclTemplateEntryId']:
logger.debug('Finding the actual Ingress ACL Template Entry to use its data')
# We are using this approach with the Stats ID as the aclTemplateEntryId points to the stats ID of an Ingress/Egress ACL Entry Template in the current version (bug report generated)
acl_rule = nc.user.ingress_acl_entry_templates.get_first(filter='statsID == "%s"' % entry['aclTemplateEntryId'])
if acl_rule:
logger.debug('Found a matching Ingress ACL Template Entry: %s' % acl_rule.description)
output['Ether type'] = acl_rule.ether_type
output['Protocol'] = acl_rule.protocol
output['Source type'] = acl_rule.location_type
if acl_rule.location_type and nc.user.fetcher_for_rest_name(acl_rule.location_type.lower()) is not None:
output['Source name'] = nc.user.fetcher_for_rest_name(acl_rule.location_type.lower()).get_first(filter='ID == "%s"' % acl_rule.location_id).name
output['Destination type'] = acl_rule.network_type
if acl_rule.network_type and nc.user.fetcher_for_rest_name(acl_rule.network_type.lower()) is not None:
output['Destination name'] = nc.user.fetcher_for_rest_name(acl_rule.network_type.lower()).get_first(filter='ID == "%s"' % acl_rule.network_id).name
output['Source port'] = acl_rule.source_port
output['Destination port'] = acl_rule.destination_port
output['DSCP'] = acl_rule.dscp
output['Stateful'] = acl_rule.stateful
output['Action'] = acl_rule.action
handle_output(output=output)
logger.debug('Handling Egress ACL entries')
for entry in egress_acl_entries:
acl_rule = None
logger.debug('Using minimal information from the policy decision entry itself')
output = {
'VM Name': vm_interface.parent.name,
'Interface MAC': vm_interface.mac,
'ACL type': 'Egress',
'Ether type': entry['etherType'],
'Protocol': entry['protocol'],
'Source type': 'VM',
'Source name': vm_interface.parent.name,
'Destination type': entry['destinationType'],
'Destination name': entry['destinationValue'],
'Source port': entry['sourcePort'],
'Destination port': entry['destinationPort'],
'DSCP': entry['DSCP'],
'Stateful': '',
'Action': entry['actionDetails']['actionType']
}
if entry['aclTemplateEntryId']:
logger.debug('Finding the actual Egress ACL Template Entry to use its data')
# We are using this approach with the Stats ID as the aclTemplateEntryId points to the stats ID of an Ingress/Egress ACL Entry Template in the current version (bug report generated)
acl_rule = nc.user.egress_acl_entry_templates.get_first(filter='statsID == "%s"' % entry['aclTemplateEntryId'])
if acl_rule:
logger.debug('Found a matching Egress ACL Template Entry: %s' % acl_rule.description)
output['Ether type'] = acl_rule.ether_type
output['Protocol'] = acl_rule.protocol
output['Source type'] = acl_rule.location_type
if acl_rule.location_type and nc.user.fetcher_for_rest_name(acl_rule.location_type.lower()) is not None:
output['Source name'] = nc.user.fetcher_for_rest_name(acl_rule.location_type.lower()).get_first(filter='ID == "%s"' % acl_rule.location_id).name
output['Destination type'] = acl_rule.network_type
if acl_rule.network_type and nc.user.fetcher_for_rest_name(acl_rule.network_type.lower()) is not None:
output['Destination name'] = nc.user.fetcher_for_rest_name(acl_rule.network_type.lower()).get_first(filter='ID == "%s"' % acl_rule.network_id).name
output['Source port'] = acl_rule.source_port
output['Destination port'] = acl_rule.destination_port
output['DSCP'] = acl_rule.dscp
output['Stateful'] = acl_rule.stateful
output['Action'] = acl_rule.action
handle_output(output=output)
logger.debug('Handling Redirect policies entries')
for entry in forward_acl_entries:
acl_rule = None
logger.debug('Using minimal information from the policy decision entry itself')
output = {
'VM Name': vm_interface.parent.name,
'Interface MAC': vm_interface.mac,
'ACL type': 'Forward',
'Ether type': entry['etherType'],
'Protocol': entry['protocol'],
'Source type': 'VM',
'Source name': vm_interface.parent.name,
'Destination type': entry['destinationType'],
'Destination name': entry['destinationValue'],
'Source port': entry['sourcePort'],
'Destination port': entry['destinationPort'],
'DSCP': entry['DSCP'],
'Stateful': '',
'Action': entry['actionDetails']['actionType']
}
if entry['ingressAdvFwdTemplateEntryId']:
logger.debug('Finding the actual Ingress Advanced ACL Template Entry to use its data')
# We are using this approach with the Stats ID as the ingressAdvFwdTemplateEntryId points to the stats ID of an Ingress/Egress ACL Entry Template in the current version (bug report generated)
acl_rule = nc.user.ingress_adv_fwd_entry_templates.get_first(filter='statsID == "%s"' % entry['ingressAdvFwdTemplateEntryId'])
if acl_rule:
logger.debug('Found a matching Ingress Advanced ACL Template Entry: %s' % acl_rule.description)
output['Ether type'] = acl_rule.ether_type
output['Protocol'] = acl_rule.protocol
output['Source type'] = acl_rule.location_type
if acl_rule.location_type and nc.user.fetcher_for_rest_name(acl_rule.location_type.lower()) is not None:
output['Source name'] = nc.user.fetcher_for_rest_name(acl_rule.location_type.lower()).get_first(filter='ID == "%s"' % acl_rule.location_id).name
output['Destination type'] = acl_rule.network_type
if acl_rule.network_type and nc.user.fetcher_for_rest_name(acl_rule.network_type.lower()) is not None:
output['Destination name'] = nc.user.fetcher_for_rest_name(acl_rule.network_type.lower()).get_first(filter='ID == "%s"' % acl_rule.network_id).name
output['Source port'] = acl_rule.source_port
output['Destination port'] = acl_rule.destination_port
output['DSCP'] = acl_rule.dscp
output['Action'] = acl_rule.action
handle_output(output=output)
logger.debug('Printing output')
if configuration['json_output']:
print(json.dumps(output_parser, sort_keys=True, indent=4))
else:
print(output_parser.get_string())
return 0
# Start program
if __name__ == "__main__":
main()
| bsd-3-clause | 6,962,605,574,775,154,000 | 38.030411 | 268 | 0.586305 | false |
offlinehacker/flumotion | flumotion/component/bouncers/icalbouncer.py | 1 | 1286 | # -*- Mode: Python; test-case-name: flumotion.test.test_icalbouncer -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""
A bouncer that only lets in during an event scheduled with an ical file.
"""
from flumotion.component.bouncers import multibouncer
from flumotion.component.bouncers.algorithms import icalbouncer
__all__ = ['IcalBouncer']
__version__ = "$Rev$"
class IcalBouncer(multibouncer.StaticMultiBouncer):
algorithmClasses = icalbouncer.IcalBouncerAlgorithm
| gpl-2.0 | 1,143,490,352,990,887,400 | 35.742857 | 74 | 0.775272 | false |
kidaa/pySDC | examples/fenics_grayscott/playground.py | 1 | 2625 |
from pySDC import CollocationClasses as collclass
from examples.fenics_grayscott.ProblemClass import fenics_grayscott
from pySDC.datatype_classes.fenics_mesh import fenics_mesh
from examples.fenics_grayscott.TransferClass import mesh_to_mesh_fenics
from examples.fenics_grayscott.HookClass import fenics_output
from pySDC.sweeper_classes.generic_LU import generic_LU
import pySDC.PFASST_blockwise as mp
# import pySDC.PFASST_stepwise as mp
from pySDC import Log
from pySDC.Stats import grep_stats, sort_stats
import dolfin as df
import numpy as np
if __name__ == "__main__":
# set global logger (remove this if you do not want the output at all)
logger = Log.setup_custom_logger('root')
num_procs = 1
# assert num_procs == 1,'turn on predictor!'
# This comes as read-in for the level class
lparams = {}
lparams['restol'] = 1E-07
sparams = {}
sparams['maxiter'] = 20
sparams['fine_comm'] = True
# This comes as read-in for the problem class
pparams = {}
pparams['Du'] = 1.0
pparams['Dv'] = 0.01
pparams['A'] = 0.01
pparams['B'] = 0.10
# pparams['Du'] = 2E-05
# pparams['Dv'] = 1E-05
# pparams['A'] = 0.03
# pparams['B'] = 0.092
pparams['t0'] = 0.0 # ugly, but necessary to set up ProblemClass
# pparams['c_nvars'] = [(16,16)]
pparams['c_nvars'] = [256]
pparams['family'] = 'CG'
pparams['order'] = [4]
pparams['refinements'] = [1]
# This comes as read-in for the transfer operations
tparams = {}
tparams['finter'] = True
# Fill description dictionary for easy hierarchy creation
description = {}
description['problem_class'] = fenics_grayscott
description['problem_params'] = pparams
description['dtype_u'] = fenics_mesh
description['dtype_f'] = fenics_mesh
description['collocation_class'] = collclass.CollGaussRadau_Right
description['num_nodes'] = 3
description['sweeper_class'] = generic_LU
description['level_params'] = lparams
description['transfer_class'] = mesh_to_mesh_fenics
description['transfer_params'] = tparams
description['hook_class'] = fenics_output
# quickly generate block of steps
MS = mp.generate_steps(num_procs,sparams,description)
# setup parameters "in time"
t0 = MS[0].levels[0].prob.t0
dt = 5.0
Tend = 400
# get initial values on finest level
P = MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend,stats = mp.run_pfasst(MS,u0=uinit,t0=t0,dt=dt,Tend=Tend)
# u1,u2 = df.split(uend.values)
# df.plot(u1,interactive=True)
| bsd-2-clause | 7,235,849,777,505,713,000 | 28.494382 | 74 | 0.663619 | false |
tensorflow/neural-structured-learning | research/gam/gam/models/gcn.py | 1 | 12450 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Graph Convolution Networks implementation adapted from https://github.com/tkipf/gcn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .models_base import glorot
from .models_base import Model
import tensorflow as tf
# Global unique layer ID dictionary for layer name assignment.
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return tf.SparseTensor(
indices=pre_out.indices,
values=pre_out.values / keep_prob,
dense_shape=pre_out.dense_shape)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class GCN(Model):
"""Graph Convolution Networks.
Attributes:
input_dim: Integer representing the number of input features.
output_dim: Integer representing the number of classes.
hidden: Integer representing the number of hidden units in the first layer
of the network.
dropout: Float representing the dropout probability during training.
aggregation: String representing an aggregation operation, that is applied
on the two inputs of the agreement model, after they are encoded through
the convolution layers. See superclass attributes for details.
activation: An activation function to be applied to the outputs of each
fully connected layer of the aggregation network.
is_binary_classification: Boolean specifying if this is model for binary
classification. If so, it uses a different loss function and returns
predictions with a single dimension, batch size.
name: String representing the model name.
"""
def __init__(self,
input_dim,
output_dim,
hidden,
dropout=0.5,
aggregation=None,
hidden_aggregation=(),
activation=tf.nn.leaky_relu,
is_binary_classification=False,
name='GCN'):
super(GCN, self).__init__(
aggregation=aggregation,
hidden_aggregation=hidden_aggregation,
activation=activation)
dropout = 0.5 if dropout is None else dropout
self.input_dim = input_dim
self.output_dim = output_dim
self.num_supports = 1
self.hidden = hidden
self.dropout = dropout
self.name = name
self.is_binary_classification = is_binary_classification
def get_encoding_and_params(self, inputs, is_train, support,
num_features_nonzero, **unused_kwargs):
"""Creates the model hidden representations and prediction ops.
For this model, the hidden representation is the last layer of the MLP,
before the logit computation. The predictions are unnormalized logits.
Args:
inputs: A tensor containing the model inputs. The first dimension is the
batch size.
is_train: A boolean placeholder specifying if this is a training or
testing setting.
support: TODO(dattias, kvis-google): add.
num_features_nonzero: Number of non-zero features.
**unused_kwargs: Other unused keyword arguments.
Returns:
encoding: A tensor containing an encoded batch of samples. The first
dimension corresponds to the batch size.
all_vars: A dictionary mapping from variable name to TensorFlow op
containing all variables used in this model.
reg_params: A dictionary mapping from a variable name to a Tensor of
parameters which will be used for regularization.
"""
# Build layers.
with tf.variable_scope(self.name + '/encoding'):
hidden, reg_params = self._construct_encoding(inputs, is_train, support,
num_features_nonzero)
# Store model variables for easy access.
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_default_graph().get_name_scope())
all_vars = {var.name: var for var in variables}
return hidden, all_vars, reg_params
def _construct_encoding(self, inputs, is_train, support,
num_features_nonzero):
"""Create weight variables."""
dropout = (
tf.constant(self.dropout, tf.float32) * tf.cast(is_train, tf.float32))
layer_1 = GraphConvolution(
input_dim=self.input_dim,
output_dim=self.hidden,
activation=tf.nn.relu,
dropout=dropout,
sparse_inputs=True,
num_features_nonzero=num_features_nonzero,
support=support,
name='GraphConvolution1')
encoding = layer_1(inputs)
reg_params = layer_1.vars
return encoding, reg_params
def get_predictions_and_params(self, encoding, is_train, **kwargs):
"""Creates the model prediction op.
For this model, the hidden representation is the last layer of the MLP,
before the logit computation. The predictions are unnormalized logits.
Args:
encoding: A tensor containing the model inputs. The first dimension is the
batch size.
is_train: A placeholder representing a boolean value that specifies if
this model will be used for training or for test.
**kwargs: Other keyword arguments.
Returns:
predictions: A tensor of logits. For multiclass classification its
shape is (num_samples, num_classes), where the second dimension contains
a logit per class. For binary classification, its shape is
(num_samples,), where each element is the probability of class 1 for
that sample.
all_vars: A dictionary mapping from variable name to TensorFlow op
containing all variables used in this model.
reg_params: A dictionary mapping from a variable name to a Tensor of
parameters which will be used for regularization.
"""
reg_params = {}
support = kwargs['support']
num_features_nonzero = kwargs['num_features_nonzero']
# Build layers.
with tf.variable_scope(self.name + '/prediction'):
dropout = (
tf.constant(self.dropout, tf.float32) * tf.cast(is_train, tf.float32))
layer_2 = GraphConvolution(
input_dim=self.hidden,
output_dim=self.output_dim,
activation=lambda x: x,
dropout=dropout,
num_features_nonzero=num_features_nonzero,
support=support,
name='GraphConvolution2')
predictions = layer_2(encoding)
if self.is_binary_classification:
predictions = predictions[:, 0]
# Store model variables for easy access.
variables = tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope=tf.get_default_graph().get_name_scope())
all_vars = {var.name: var for var in variables}
return predictions, all_vars, reg_params
def get_loss(self,
predictions,
targets,
name_scope='loss',
reg_params=None,
**kwargs):
"""Returns a loss between the provided targets and predictions.
For binary classification, this loss is sigmoid cross entropy. For
multi-class classification, it is softmax cross entropy.
A weight decay loss is also added to the parameters passed in reg_params.
Args:
predictions: A tensor of predictions. For multiclass classification its
shape is (num_samples, num_classes), where the second dimension contains
a logit per class. For binary classification, its shape is
(num_samples,), where each element is the probability of class 1 for
that sample.
targets: A tensor of targets of shape (num_samples,), where each row
contains the label index of the corresponding sample.
name_scope: A string containing the name scope used in TensorFlow.
reg_params: A dictonary of parameters, mapping from name to parameter, for
the variables to be included in the weight decay loss. If None, no
weight decay is applied.
**kwargs: Keyword arguments, potentially containing the weight of the
regularization term, passed under the name `weight_decay`. If this is
not provided, it defaults to 0.0.
Returns:
loss: The cummulated loss value.
"""
reg_params = reg_params if reg_params is not None else {}
weight_decay = kwargs['weight_decay'] if 'weight_decay' in kwargs else None
with tf.name_scope(name_scope):
# Cross entropy error.
if self.is_binary_classification:
loss = tf.reduce_sum(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=targets, logits=predictions))
else:
loss = tf.losses.softmax_cross_entropy(targets, predictions)
# Weight decay loss.
if weight_decay is not None:
for var in reg_params.values():
loss = loss + weight_decay * tf.nn.l2_loss(var)
return loss
def normalize_predictions(self, predictions):
"""Converts predictions to probabilities.
Args:
predictions: A tensor of logits. For multiclass classification its shape
is (num_samples, num_classes), where the second dimension contains a
logit per class. For binary classification, its shape is (num_samples,),
where each element is the probability of class 1 for that sample.
Returns:
A tensor of the same shape as predictions, with values between [0, 1]
representing probabilities.
"""
if self.is_binary_classification:
return tf.nn.sigmoid(predictions)
return tf.nn.softmax(predictions, axis=-1)
class GraphConvolution(object):
"""Graph convolution layer."""
def __init__(self,
input_dim,
output_dim,
support,
num_features_nonzero,
dropout=0.,
sparse_inputs=False,
activation=tf.nn.relu,
bias=False,
featureless=False,
name=None):
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
self.dropout = dropout
self.act = activation
self.support = support
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# Helper variable for sparse dropout.
self.num_features_nonzero = num_features_nonzero
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = tf.get_variable(
name='weights', initializer=glorot([input_dim, output_dim]))
if self.bias:
self.vars['bias'] = tf.get_variable(
name='bias', initializer=tf.zeros(shape=[output_dim]))
def __call__(self, inputs):
with tf.name_scope(self.name):
outputs = self._call(inputs)
return outputs
def _call(self, inputs):
"""Run over inputs."""
x = inputs
# Dropout.
if self.sparse_inputs:
x = sparse_dropout(x, 1 - self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1 - self.dropout)
# Convolve.
if not self.featureless:
pre_sup = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights']
support = dot(self.support, pre_sup, sparse=True)
output = support
# Bias.
if self.bias:
output += self.vars['bias']
return self.act(output)
| apache-2.0 | -7,927,998,227,653,839,000 | 35.086957 | 90 | 0.658233 | false |
google/struct2tensor | struct2tensor/expression_impl/promote_test.py | 1 | 23374 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for struct2tensor.promote."""
from struct2tensor import create_expression
from struct2tensor import path
from struct2tensor.expression_impl import promote
from struct2tensor.test import expression_test_util
from struct2tensor.test import prensor_test_util
import tensorflow as tf
from absl.testing import absltest
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
from tensorflow_metadata.proto.v0 import schema_pb2
class PromoteTest(absltest.TestCase):
def assertLen(self, arr, expected_len):
self.assertEqual(len(arr), expected_len) # pylint:disable=g-generic-assert
def test_promote_anonymous(self):
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_nested_prensor())
new_root, new_field = promote.promote_anonymous(
expr, path.Path(["user", "friends"]))
new_field = new_root.get_descendant_or_error(new_field)
self.assertTrue(new_field.is_repeated)
self.assertEqual(new_field.type, tf.string)
self.assertTrue(new_field.is_leaf)
self.assertFalse(new_field.calculation_is_identity())
self.assertTrue(new_field.calculation_equal(new_field))
self.assertFalse(new_field.calculation_equal(expr))
leaf_node = expression_test_util.calculate_value_slowly(new_field)
self.assertEqual(leaf_node.values.dtype, tf.string)
self.assertEqual(new_field.known_field_names(), frozenset())
sources = new_field.get_source_expressions()
self.assertLen(sources, 2)
self.assertIs(
expr.get_descendant_or_error(path.Path(["user", "friends"])),
sources[0])
self.assertIs(expr.get_child_or_error("user"), sources[1])
def test_promote_with_schema(self):
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor()).apply_schema(
prensor_test_util.create_big_prensor_schema())
new_root, new_field = promote.promote_anonymous(
expr, path.Path(["user", "friends"]))
new_field = new_root.get_descendant_or_error(new_field)
new_schema_feature = new_field.schema_feature
self.assertIsNotNone(new_schema_feature)
self.assertEqual(new_schema_feature.string_domain.value[0], "a")
def test_promote_with_schema_dense_parent(self):
s = prensor_test_util.create_big_prensor_schema()
feature_dict = {feature.name: feature for feature in s.feature}
user_feature = feature_dict["user"]
user_feature.value_count.min = 3
user_feature.value_count.max = 3
user_feature.presence.min_fraction = 1
user_feature.lifecycle_stage = schema_pb2.LifecycleStage.ALPHA
user_dict = {
feature.name: feature for feature in user_feature.struct_domain.feature
}
friends_feature = user_dict["friends"]
friends_feature.value_count.min = 2
friends_feature.value_count.max = 2
friends_feature.presence.min_fraction = 1
friends_feature.presence.min_count = 10
friends_feature.lifecycle_stage = schema_pb2.LifecycleStage.BETA
friends_feature.distribution_constraints.min_domain_mass = 0.5
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor()).apply_schema(s)
new_root, new_field = promote.promote_anonymous(
expr, path.Path(["user", "friends"]))
new_field = new_root.get_descendant_or_error(new_field)
new_schema_feature = new_field.schema_feature
self.assertIsNotNone(new_schema_feature)
self.assertEqual(new_schema_feature.string_domain.value[0], "a")
self.assertEqual(new_schema_feature.value_count.max, 6)
self.assertEqual(new_schema_feature.value_count.min, 6)
self.assertEqual(new_schema_feature.presence.min_fraction, 1)
self.assertEqual(new_schema_feature.presence.min_count, 3)
self.assertEqual(new_schema_feature.lifecycle_stage,
schema_pb2.LifecycleStage.ALPHA)
self.assertEqual(
new_schema_feature.distribution_constraints.min_domain_mass, 0.5)
def test_lifecycle_stage(self):
# Stages have the following priority, from lowest to highest:
# schema_pb2.LifecycleStage.DEPRECATED
# schema_pb2.LifecycleStage.DISABLED
# schema_pb2.LifecycleStage.PLANNED,
# schema_pb2.LifecycleStage.ALPHA
# schema_pb2.LifecycleStage.DEBUG_ONLY,
# None
# schema_pb2.LifecycleStage.UNKNOWN_STAGE,
# schema_pb2.LifecycleStage.BETA
# schema_pb2.LifecycleStage.PRODUCTION
def _check_lifecycle_stage(a, b):
s = prensor_test_util.create_big_prensor_schema()
feature_dict = {feature.name: feature for feature in s.feature}
user_feature = feature_dict["user"]
if a is not None:
user_feature.lifecycle_stage = a
user_dict = {
feature.name: feature
for feature in user_feature.struct_domain.feature
}
friends_feature = user_dict["friends"]
if b is not None:
friends_feature.lifecycle_stage = b
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor()).apply_schema(s)
new_root, new_field = promote.promote_anonymous(
expr, path.Path(["user", "friends"]))
new_field = new_root.get_descendant_or_error(new_field)
return new_field.schema_feature.lifecycle_stage
self.assertEqual(
schema_pb2.LifecycleStage.DEPRECATED,
_check_lifecycle_stage(schema_pb2.LifecycleStage.DEPRECATED,
schema_pb2.LifecycleStage.DISABLED))
self.assertEqual(
schema_pb2.LifecycleStage.DEPRECATED,
_check_lifecycle_stage(schema_pb2.LifecycleStage.DISABLED,
schema_pb2.LifecycleStage.DEPRECATED))
self.assertEqual(
schema_pb2.LifecycleStage.DISABLED,
_check_lifecycle_stage(schema_pb2.LifecycleStage.PLANNED,
schema_pb2.LifecycleStage.DISABLED))
self.assertEqual(
schema_pb2.LifecycleStage.DEPRECATED,
_check_lifecycle_stage(schema_pb2.LifecycleStage.PLANNED,
schema_pb2.LifecycleStage.DEPRECATED))
self.assertEqual(
schema_pb2.LifecycleStage.PLANNED,
_check_lifecycle_stage(schema_pb2.LifecycleStage.PLANNED,
schema_pb2.LifecycleStage.ALPHA))
self.assertEqual(
schema_pb2.LifecycleStage.PLANNED,
_check_lifecycle_stage(schema_pb2.LifecycleStage.ALPHA,
schema_pb2.LifecycleStage.PLANNED))
self.assertEqual(
schema_pb2.LifecycleStage.ALPHA,
_check_lifecycle_stage(schema_pb2.LifecycleStage.DEBUG_ONLY,
schema_pb2.LifecycleStage.ALPHA))
self.assertEqual(
schema_pb2.LifecycleStage.ALPHA,
_check_lifecycle_stage(schema_pb2.LifecycleStage.ALPHA,
schema_pb2.LifecycleStage.DEBUG_ONLY))
self.assertEqual(
schema_pb2.LifecycleStage.DEBUG_ONLY,
_check_lifecycle_stage(schema_pb2.LifecycleStage.DEBUG_ONLY, None))
self.assertEqual(
schema_pb2.LifecycleStage.DEBUG_ONLY,
_check_lifecycle_stage(None, schema_pb2.LifecycleStage.DEBUG_ONLY))
# None looks like UNKNOWN_STAGE.
self.assertEqual(
schema_pb2.LifecycleStage.UNKNOWN_STAGE,
_check_lifecycle_stage(None, schema_pb2.LifecycleStage.UNKNOWN_STAGE))
self.assertEqual(
schema_pb2.LifecycleStage.UNKNOWN_STAGE,
_check_lifecycle_stage(schema_pb2.LifecycleStage.UNKNOWN_STAGE, None))
self.assertEqual(
schema_pb2.LifecycleStage.UNKNOWN_STAGE,
_check_lifecycle_stage(schema_pb2.LifecycleStage.BETA,
schema_pb2.LifecycleStage.UNKNOWN_STAGE))
self.assertEqual(
schema_pb2.LifecycleStage.UNKNOWN_STAGE,
_check_lifecycle_stage(schema_pb2.LifecycleStage.UNKNOWN_STAGE,
schema_pb2.LifecycleStage.BETA))
self.assertEqual(
schema_pb2.LifecycleStage.BETA,
_check_lifecycle_stage(schema_pb2.LifecycleStage.BETA,
schema_pb2.LifecycleStage.PRODUCTION))
self.assertEqual(
schema_pb2.LifecycleStage.BETA,
_check_lifecycle_stage(schema_pb2.LifecycleStage.PRODUCTION,
schema_pb2.LifecycleStage.BETA))
def test_promote_with_schema_dense_fraction(self):
"""Test when min_fraction is not 1."""
s = prensor_test_util.create_big_prensor_schema()
feature_dict = {feature.name: feature for feature in s.feature}
user_feature = feature_dict["user"]
user_feature.value_count.min = 3
user_feature.value_count.max = 3
user_feature.presence.min_fraction = 1
user_dict = {
feature.name: feature for feature in user_feature.struct_domain.feature
}
friends_feature = user_dict["friends"]
friends_feature.presence.min_fraction = 0.9
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_big_prensor()).apply_schema(s)
new_root, new_field = promote.promote_anonymous(
expr, path.Path(["user", "friends"]))
new_field = new_root.get_descendant_or_error(new_field)
new_schema_feature = new_field.schema_feature
self.assertIsNotNone(new_schema_feature)
self.assertEqual(new_schema_feature.presence.min_fraction, 0.3)
def test_promote_optional_child_of_repeated(self):
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_nested_prensor())
new_root, new_field = promote.promote_anonymous(
expr, path.Path(["doc", "keep_me"]))
new_expr = new_root.get_descendant_or_error(new_field)
self.assertTrue(new_expr.is_repeated)
def test_promote(self):
"""Tests promote.promote(...), and indirectly tests set_path."""
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_nested_prensor())
new_root = promote.promote(expr, path.Path(["user", "friends"]),
"new_field")
new_field = new_root.get_child_or_error("new_field")
self.assertIsNotNone(new_field)
self.assertTrue(new_field.is_repeated)
self.assertEqual(new_field.type, tf.string)
self.assertTrue(new_field.is_leaf)
leaf_node = expression_test_util.calculate_value_slowly(new_field)
self.assertEqual(leaf_node.values.dtype, tf.string)
self.assertEqual(new_field.known_field_names(), frozenset())
def test_promote_substructure(self):
"""Tests promote.promote(...) of substructure."""
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_deep_prensor())
new_root = promote.promote(expr, path.Path(["event", "doc"]), "new_field")
new_field = new_root.get_child_or_error("new_field")
self.assertIsNotNone(new_field)
self.assertTrue(new_field.is_repeated)
self.assertEqual(new_field.known_field_names(),
frozenset(["bar", "keep_me"]))
bar_expr = new_field.get_child_or_error("bar")
self.assertIsNotNone(bar_expr)
self.assertTrue(bar_expr.is_repeated)
self.assertEqual(bar_expr.type, tf.string)
self.assertTrue(bar_expr.is_leaf)
keep_me_expr = new_field.get_child_or_error("keep_me")
self.assertIsNotNone(keep_me_expr)
self.assertFalse(keep_me_expr.is_repeated)
self.assertEqual(keep_me_expr.type, tf.bool)
self.assertTrue(keep_me_expr.is_leaf)
child_node = expression_test_util.calculate_value_slowly(new_field)
self.assertEqual(child_node.size, 3)
self.assertTrue(child_node.is_repeated)
bar_node = expression_test_util.calculate_value_slowly(bar_expr)
self.assertEqual(bar_node.values.dtype, tf.string)
keep_me_node = expression_test_util.calculate_value_slowly(keep_me_expr)
self.assertEqual(keep_me_node.values.dtype, tf.bool)
def test_promote_substructure_then_leaf(self):
"""Tests expr.promote(...) of substructure and then a leaf."""
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_deep_prensor())
new_root = (expr
.promote(path.Path(["event", "doc"]), "new_field")
.promote(path.Path(["new_field", "bar"]), "new_bar"))
new_bar = new_root.get_child_or_error("new_bar")
self.assertIsNotNone(new_bar)
self.assertTrue(new_bar.is_repeated)
self.assertEqual(new_bar.type, tf.string)
self.assertTrue(new_bar.is_leaf)
new_field_bar = new_root.get_descendant_or_error(
path.Path(["new_field", "bar"]))
self.assertIsNotNone(new_field_bar)
self.assertTrue(new_bar.is_repeated)
self.assertEqual(new_bar.type, tf.string)
self.assertTrue(new_bar.is_leaf)
new_field_keep_me = new_root.get_descendant_or_error(
path.Path(["new_field", "keep_me"]))
self.assertIsNotNone(new_field_keep_me)
self.assertFalse(new_field_keep_me.is_repeated)
self.assertEqual(new_field_keep_me.type, tf.bool)
self.assertTrue(new_field_keep_me.is_leaf)
bar_node = expression_test_util.calculate_value_slowly(new_bar)
self.assertEqual(bar_node.values.dtype, tf.string)
new_field_bar_node = expression_test_util.calculate_value_slowly(
new_field_bar)
self.assertEqual(new_field_bar_node.values.dtype, tf.string)
new_field_keep_me_node = expression_test_util.calculate_value_slowly(
new_field_keep_me)
self.assertEqual(new_field_keep_me_node.values.dtype, tf.bool)
def test_promote_leaf_then_substructure(self):
"""Tests expr.promote(...) of leaf and then a substructure."""
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_four_layer_prensor())
new_root = (
expr
.promote(path.Path(["event", "doc", "nested_child", "bar"]), "new_bar")
.promote(path.Path(["event", "doc"]), "new_doc"))
new_doc = new_root.get_child_or_error("new_doc")
self.assertIsNotNone(new_doc)
self.assertTrue(new_doc.is_repeated)
self.assertEqual(new_doc.known_field_names(),
frozenset(["nested_child", "new_bar"]))
new_bar_expr = new_doc.get_child_or_error("new_bar")
self.assertIsNotNone(new_bar_expr)
self.assertTrue(new_bar_expr.is_repeated)
self.assertEqual(new_bar_expr.type, tf.string)
self.assertTrue(new_bar_expr.is_leaf)
nested_child_expr = new_doc.get_child_or_error("nested_child")
self.assertIsNotNone(nested_child_expr)
self.assertTrue(nested_child_expr.is_repeated)
self.assertEqual(nested_child_expr.known_field_names(),
frozenset(["bar", "keep_me"]))
bar_expr = nested_child_expr.get_child_or_error("bar")
self.assertIsNotNone(bar_expr)
self.assertTrue(bar_expr.is_repeated)
self.assertEqual(bar_expr.type, tf.string)
self.assertTrue(bar_expr.is_leaf)
keep_me_expr = nested_child_expr.get_child_or_error("keep_me")
self.assertIsNotNone(keep_me_expr)
self.assertFalse(keep_me_expr.is_repeated)
self.assertEqual(keep_me_expr.type, tf.bool)
self.assertTrue(keep_me_expr.is_leaf)
bar_node = expression_test_util.calculate_value_slowly(new_bar_expr)
self.assertEqual(bar_node.values.dtype, tf.string)
@test_util.run_all_in_graph_and_eager_modes
class PromoteValuesTest(tf.test.TestCase):
def test_promote_and_calculate(self):
"""Tests promoting a leaf on a nested tree."""
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_nested_prensor())
new_root, new_path = promote.promote_anonymous(
expr, path.Path(["user", "friends"]))
new_field = new_root.get_descendant_or_error(new_path)
leaf_node = expression_test_util.calculate_value_slowly(new_field)
self.assertAllEqual(leaf_node.parent_index, [0, 1, 1, 1, 2])
self.assertAllEqual(leaf_node.values, [b"a", b"b", b"c", b"d", b"e"])
def test_promote_and_calculate_substructure(self):
"""Tests promoting substructure on a tree with depth of 4."""
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_four_layer_prensor())
new_root, new_path = promote.promote_anonymous(
expr, path.Path(["event", "doc", "nested_child"]))
new_nested_child = new_root.get_descendant_or_error(new_path)
bar_expr = new_root.get_descendant_or_error(new_path.get_child("bar"))
keep_me_expr = new_root.get_descendant_or_error(
new_path.get_child("keep_me"))
# the promoted nested_child's parent index is changed.
nested_child_node = expression_test_util.calculate_value_slowly(
new_nested_child)
self.assertAllEqual(nested_child_node.parent_index, [0, 1, 1, 1])
self.assertTrue(nested_child_node.is_repeated)
# bar's parent index should be unchanged.
bar_node = expression_test_util.calculate_value_slowly(bar_expr)
self.assertAllEqual(bar_node.parent_index, [0, 1, 1, 2])
self.assertAllEqual(bar_node.values, [b"a", b"b", b"c", b"d"])
self.assertTrue(bar_node.is_repeated)
# keep_me's parent index should be unchanged.
keep_me_node = expression_test_util.calculate_value_slowly(keep_me_expr)
self.assertAllEqual(keep_me_node.parent_index, [0, 1])
self.assertAllEqual(keep_me_node.values, [False, True])
self.assertFalse(keep_me_node.is_repeated)
def test_promote_and_calculate_substructure_then_leaf(self):
"""Tests promoting of substructure and then a leaf."""
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_four_layer_prensor())
new_root, new_nested_child_path = promote.promote_anonymous(
expr, path.Path(["event", "doc", "nested_child"]))
new_root, new_bar_path = promote.promote_anonymous(
new_root, new_nested_child_path.get_child("bar"))
# the promoted nested_child's parent index is changed.
new_nested_child = new_root.get_descendant_or_error(new_nested_child_path)
nested_child_node = expression_test_util.calculate_value_slowly(
new_nested_child)
self.assertAllEqual(nested_child_node.parent_index, [0, 1, 1, 1])
self.assertTrue(nested_child_node.is_repeated)
# promoted bar's parent index is changed.
new_bar = new_root.get_descendant_or_error(new_bar_path)
bar_node = expression_test_util.calculate_value_slowly(new_bar)
self.assertAllEqual(bar_node.parent_index, [0, 1, 1, 1])
self.assertAllEqual(bar_node.values, [b"a", b"b", b"c", b"d"])
self.assertTrue(bar_node.is_repeated)
# bar's parent index should be unchanged.
nested_child_bar = new_root.get_descendant_or_error(
new_nested_child_path.get_child("bar"))
nested_child_bar_node = expression_test_util.calculate_value_slowly(
nested_child_bar)
self.assertAllEqual(nested_child_bar_node.parent_index, [0, 1, 1, 2])
self.assertAllEqual(nested_child_bar_node.values, [b"a", b"b", b"c", b"d"])
self.assertTrue(nested_child_bar_node.is_repeated)
# keep_me's parent index should be unchanged.
nested_child_keep_me = new_root.get_descendant_or_error(
new_nested_child_path.get_child("keep_me"))
nested_child_keep_me_node = expression_test_util.calculate_value_slowly(
nested_child_keep_me)
self.assertAllEqual(nested_child_keep_me_node.parent_index, [0, 1])
self.assertAllEqual(nested_child_keep_me_node.values, [False, True])
self.assertFalse(nested_child_keep_me_node.is_repeated)
def test_promote_and_calculate_leaf_then_substructure(self):
"""Tests promoting of leaf and then a substructure."""
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_four_layer_prensor())
new_root, new_bar_path = promote.promote_anonymous(
expr, path.Path(["event", "doc", "nested_child", "bar"]))
new_root, new_path = promote.promote_anonymous(new_root,
path.Path(["event", "doc"]))
new_doc = new_root.get_descendant_or_error(new_path)
new_bar = new_root.get_descendant_or_error(
new_path.concat(new_bar_path.suffix(2)))
bar_expr = new_root.get_descendant_or_error(
new_path.concat(path.Path(["nested_child", "bar"])))
keep_me_expr = new_root.get_descendant_or_error(
new_path.concat(path.Path(["nested_child", "keep_me"])))
new_doc_node = expression_test_util.calculate_value_slowly(new_doc)
self.assertAllEqual(new_doc_node.parent_index, [0, 1, 1])
self.assertTrue(new_doc_node.is_repeated)
# new_bar's parent index is changed (from the first promote).
# The second promote should not change new_bar's parent index.
new_bar_node = expression_test_util.calculate_value_slowly(new_bar)
self.assertAllEqual(new_bar_node.parent_index, [0, 1, 1, 1])
self.assertAllEqual(new_bar_node.values, [b"a", b"b", b"c", b"d"])
self.assertTrue(new_bar_node.is_repeated)
# bar's parent index should be unchanged.
bar_node = expression_test_util.calculate_value_slowly(bar_expr)
self.assertAllEqual(bar_node.parent_index, [0, 1, 1, 2])
self.assertAllEqual(bar_node.values, [b"a", b"b", b"c", b"d"])
self.assertTrue(bar_node.is_repeated)
# keep_me's parent index should be unchanged.
keep_me_node = expression_test_util.calculate_value_slowly(keep_me_expr)
self.assertAllEqual(keep_me_node.parent_index, [0, 1])
self.assertAllEqual(keep_me_node.values, [False, True])
self.assertFalse(keep_me_node.is_repeated)
def test_promote_substructure_with_schema(self):
expr = create_expression.create_expression_from_prensor(
prensor_test_util.create_deep_prensor()).apply_schema(
prensor_test_util.create_deep_prensor_schema())
original_schema = expr.get_descendant_or_error(path.Path(["event", "doc"
])).schema_feature
new_root, new_field_path = promote.promote_anonymous(
expr, path.Path(["event", "doc"]))
new_field = new_root.get_descendant_or_error(new_field_path)
new_schema_feature = new_field.schema_feature
self.assertIsNotNone(new_schema_feature)
# The struct_domain of this feature should not be changed.
self.assertProtoEquals(new_schema_feature.struct_domain,
original_schema.struct_domain)
bar_schema = new_root.get_descendant_or_error(
new_field_path.concat(path.Path(["bar"]))).schema_feature
self.assertIsNotNone(bar_schema)
self.assertEqual(bar_schema.string_domain.value[0], "a")
keep_me_schema = new_root.get_descendant_or_error(
new_field_path.concat(path.Path(["keep_me"]))).schema_feature
self.assertIsNotNone(keep_me_schema)
self.assertEqual(keep_me_schema.presence.min_count, 1)
if __name__ == "__main__":
absltest.main()
| apache-2.0 | 8,708,046,950,446,265,000 | 43.185255 | 95 | 0.685634 | false |
plotly/plotly.py | packages/python/plotly/plotly/basewidget.py | 1 | 34379 | import uuid
from importlib import import_module
import os
import numbers
try:
from urllib import parse
except ImportError:
from urlparse import urlparse as parse
import ipywidgets as widgets
from traitlets import List, Unicode, Dict, observe, Integer
from .basedatatypes import BaseFigure, BasePlotlyType
from .callbacks import BoxSelector, LassoSelector, InputDeviceState, Points
from .serializers import custom_serializers
from .version import __frontend_version__
@widgets.register
class BaseFigureWidget(BaseFigure, widgets.DOMWidget):
"""
Base class for FigureWidget. The FigureWidget class is code-generated as a
subclass
"""
# Widget Traits
# -------------
# Widget traitlets are automatically synchronized with the FigureModel
# JavaScript object
_view_name = Unicode("FigureView").tag(sync=True)
_view_module = Unicode("jupyterlab-plotly").tag(sync=True)
_view_module_version = Unicode(__frontend_version__).tag(sync=True)
_model_name = Unicode("FigureModel").tag(sync=True)
_model_module = Unicode("jupyterlab-plotly").tag(sync=True)
_model_module_version = Unicode(__frontend_version__).tag(sync=True)
# ### _data and _layout ###
# These properties store the current state of the traces and
# layout as JSON-style dicts. These dicts do not store any subclasses of
# `BasePlotlyType`
#
# Note: These are only automatically synced with the frontend on full
# assignment, not on mutation. We use this fact to only directly sync
# them to the front-end on FigureWidget construction. All other updates
# are made using mutation, and they are manually synced to the frontend
# using the relayout/restyle/update/etc. messages.
_layout = Dict().tag(sync=True, **custom_serializers)
_data = List().tag(sync=True, **custom_serializers)
_config = Dict().tag(sync=True, **custom_serializers)
# ### Python -> JS message properties ###
# These properties are used to send messages from Python to the
# frontend. Messages are sent by assigning the message contents to the
# appropriate _py2js_* property and then immediatly assigning None to the
# property.
#
# See JSDoc comments in the FigureModel class in js/src/Figure.js for
# detailed descriptions of the messages.
_py2js_addTraces = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_py2js_restyle = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_py2js_relayout = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_py2js_update = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_py2js_animate = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_py2js_deleteTraces = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_py2js_moveTraces = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_py2js_removeLayoutProps = Dict(allow_none=True).tag(
sync=True, **custom_serializers
)
_py2js_removeTraceProps = Dict(allow_none=True).tag(sync=True, **custom_serializers)
# ### JS -> Python message properties ###
# These properties are used to receive messages from the frontend.
# Messages are received by defining methods that observe changes to these
# properties. Receive methods are named `_handler_js2py_*` where '*' is
# the name of the corresponding message property. Receive methods are
# responsible for setting the message property to None after retreiving
# the message data.
#
# See JSDoc comments in the FigureModel class in js/src/Figure.js for
# detailed descriptions of the messages.
_js2py_traceDeltas = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_js2py_layoutDelta = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_js2py_restyle = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_js2py_relayout = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_js2py_update = Dict(allow_none=True).tag(sync=True, **custom_serializers)
_js2py_pointsCallback = Dict(allow_none=True).tag(sync=True, **custom_serializers)
# ### Message tracking properties ###
# The _last_layout_edit_id and _last_trace_edit_id properties are used
# to keep track of the edit id of the message that most recently
# requested an update to the Figures layout or traces respectively.
#
# We track this information because we don't want to update the Figure's
# default layout/trace properties (_layout_defaults, _data_defaults)
# while edits are in process. This can lead to inconsistent property
# states.
_last_layout_edit_id = Integer(0).tag(sync=True)
_last_trace_edit_id = Integer(0).tag(sync=True)
_set_trace_uid = True
_allow_disable_validation = False
# Constructor
# -----------
def __init__(
self, data=None, layout=None, frames=None, skip_invalid=False, **kwargs
):
# Call superclass constructors
# ----------------------------
# Note: We rename layout to layout_plotly because to deconflict it
# with the `layout` constructor parameter of the `widgets.DOMWidget`
# ipywidgets class
super(BaseFigureWidget, self).__init__(
data=data,
layout_plotly=layout,
frames=frames,
skip_invalid=skip_invalid,
**kwargs
)
# Validate Frames
# ---------------
# Frames are not supported by figure widget
if self._frame_objs:
BaseFigureWidget._display_frames_error()
# Message States
# --------------
# ### Layout ###
# _last_layout_edit_id is described above
self._last_layout_edit_id = 0
# _layout_edit_in_process is set to True if there are layout edit
# operations that have been sent to the frontend that haven't
# completed yet.
self._layout_edit_in_process = False
# _waiting_edit_callbacks is a list of callback functions that
# should be executed as soon as all pending edit operations are
# completed
self._waiting_edit_callbacks = []
# ### Trace ###
# _last_trace_edit_id: described above
self._last_trace_edit_id = 0
# _trace_edit_in_process is set to True if there are trace edit
# operations that have been sent to the frontend that haven't
# completed yet.
self._trace_edit_in_process = False
# View count
# ----------
# ipywidget property that stores the number of active frontend
# views of this widget
self._view_count = 0
# Python -> JavaScript Messages
# -----------------------------
def _send_relayout_msg(self, layout_data, source_view_id=None):
"""
Send Plotly.relayout message to the frontend
Parameters
----------
layout_data : dict
Plotly.relayout layout data
source_view_id : str
UID of view that triggered this relayout operation
(e.g. By the user clicking 'zoom' in the toolbar). None if the
operation was not triggered by a frontend view
"""
# Increment layout edit messages IDs
# ----------------------------------
layout_edit_id = self._last_layout_edit_id + 1
self._last_layout_edit_id = layout_edit_id
self._layout_edit_in_process = True
# Build message
# -------------
msg_data = {
"relayout_data": layout_data,
"layout_edit_id": layout_edit_id,
"source_view_id": source_view_id,
}
# Send message
# ------------
self._py2js_relayout = msg_data
self._py2js_relayout = None
def _send_restyle_msg(self, restyle_data, trace_indexes=None, source_view_id=None):
"""
Send Plotly.restyle message to the frontend
Parameters
----------
restyle_data : dict
Plotly.restyle restyle data
trace_indexes : list[int]
List of trace indexes that the restyle operation
applies to
source_view_id : str
UID of view that triggered this restyle operation
(e.g. By the user clicking the legend to hide a trace).
None if the operation was not triggered by a frontend view
"""
# Validate / normalize inputs
# ---------------------------
trace_indexes = self._normalize_trace_indexes(trace_indexes)
# Increment layout/trace edit message IDs
# ---------------------------------------
layout_edit_id = self._last_layout_edit_id + 1
self._last_layout_edit_id = layout_edit_id
self._layout_edit_in_process = True
trace_edit_id = self._last_trace_edit_id + 1
self._last_trace_edit_id = trace_edit_id
self._trace_edit_in_process = True
# Build message
# -------------
restyle_msg = {
"restyle_data": restyle_data,
"restyle_traces": trace_indexes,
"trace_edit_id": trace_edit_id,
"layout_edit_id": layout_edit_id,
"source_view_id": source_view_id,
}
# Send message
# ------------
self._py2js_restyle = restyle_msg
self._py2js_restyle = None
def _send_addTraces_msg(self, new_traces_data):
"""
Send Plotly.addTraces message to the frontend
Parameters
----------
new_traces_data : list[dict]
List of trace data for new traces as accepted by Plotly.addTraces
"""
# Increment layout/trace edit message IDs
# ---------------------------------------
layout_edit_id = self._last_layout_edit_id + 1
self._last_layout_edit_id = layout_edit_id
self._layout_edit_in_process = True
trace_edit_id = self._last_trace_edit_id + 1
self._last_trace_edit_id = trace_edit_id
self._trace_edit_in_process = True
# Build message
# -------------
add_traces_msg = {
"trace_data": new_traces_data,
"trace_edit_id": trace_edit_id,
"layout_edit_id": layout_edit_id,
}
# Send message
# ------------
self._py2js_addTraces = add_traces_msg
self._py2js_addTraces = None
def _send_moveTraces_msg(self, current_inds, new_inds):
"""
Send Plotly.moveTraces message to the frontend
Parameters
----------
current_inds : list[int]
List of current trace indexes
new_inds : list[int]
List of new trace indexes
"""
# Build message
# -------------
move_msg = {"current_trace_inds": current_inds, "new_trace_inds": new_inds}
# Send message
# ------------
self._py2js_moveTraces = move_msg
self._py2js_moveTraces = None
def _send_update_msg(
self, restyle_data, relayout_data, trace_indexes=None, source_view_id=None
):
"""
Send Plotly.update message to the frontend
Parameters
----------
restyle_data : dict
Plotly.update restyle data
relayout_data : dict
Plotly.update relayout data
trace_indexes : list[int]
List of trace indexes that the update operation applies to
source_view_id : str
UID of view that triggered this update operation
(e.g. By the user clicking a button).
None if the operation was not triggered by a frontend view
"""
# Validate / normalize inputs
# ---------------------------
trace_indexes = self._normalize_trace_indexes(trace_indexes)
# Increment layout/trace edit message IDs
# ---------------------------------------
trace_edit_id = self._last_trace_edit_id + 1
self._last_trace_edit_id = trace_edit_id
self._trace_edit_in_process = True
layout_edit_id = self._last_layout_edit_id + 1
self._last_layout_edit_id = layout_edit_id
self._layout_edit_in_process = True
# Build message
# -------------
update_msg = {
"style_data": restyle_data,
"layout_data": relayout_data,
"style_traces": trace_indexes,
"trace_edit_id": trace_edit_id,
"layout_edit_id": layout_edit_id,
"source_view_id": source_view_id,
}
# Send message
# ------------
self._py2js_update = update_msg
self._py2js_update = None
def _send_animate_msg(
self, styles_data, relayout_data, trace_indexes, animation_opts
):
"""
Send Plotly.update message to the frontend
Note: there is no source_view_id parameter because animations
triggered by the fontend are not currently supported
Parameters
----------
styles_data : list[dict]
Plotly.animate styles data
relayout_data : dict
Plotly.animate relayout data
trace_indexes : list[int]
List of trace indexes that the animate operation applies to
"""
# Validate / normalize inputs
# ---------------------------
trace_indexes = self._normalize_trace_indexes(trace_indexes)
# Increment layout/trace edit message IDs
# ---------------------------------------
trace_edit_id = self._last_trace_edit_id + 1
self._last_trace_edit_id = trace_edit_id
self._trace_edit_in_process = True
layout_edit_id = self._last_layout_edit_id + 1
self._last_layout_edit_id = layout_edit_id
self._layout_edit_in_process = True
# Build message
# -------------
animate_msg = {
"style_data": styles_data,
"layout_data": relayout_data,
"style_traces": trace_indexes,
"animation_opts": animation_opts,
"trace_edit_id": trace_edit_id,
"layout_edit_id": layout_edit_id,
"source_view_id": None,
}
# Send message
# ------------
self._py2js_animate = animate_msg
self._py2js_animate = None
def _send_deleteTraces_msg(self, delete_inds):
"""
Send Plotly.deleteTraces message to the frontend
Parameters
----------
delete_inds : list[int]
List of trace indexes of traces to delete
"""
# Increment layout/trace edit message IDs
# ---------------------------------------
trace_edit_id = self._last_trace_edit_id + 1
self._last_trace_edit_id = trace_edit_id
self._trace_edit_in_process = True
layout_edit_id = self._last_layout_edit_id + 1
self._last_layout_edit_id = layout_edit_id
self._layout_edit_in_process = True
# Build message
# -------------
delete_msg = {
"delete_inds": delete_inds,
"layout_edit_id": layout_edit_id,
"trace_edit_id": trace_edit_id,
}
# Send message
# ------------
self._py2js_deleteTraces = delete_msg
self._py2js_deleteTraces = None
# JavaScript -> Python Messages
# -----------------------------
@observe("_js2py_traceDeltas")
def _handler_js2py_traceDeltas(self, change):
"""
Process trace deltas message from the frontend
"""
# Receive message
# ---------------
msg_data = change["new"]
if not msg_data:
self._js2py_traceDeltas = None
return
trace_deltas = msg_data["trace_deltas"]
trace_edit_id = msg_data["trace_edit_id"]
# Apply deltas
# ------------
# We only apply the deltas if this message corresponds to the most
# recent trace edit operation
if trace_edit_id == self._last_trace_edit_id:
# ### Loop over deltas ###
for delta in trace_deltas:
# #### Find existing trace for uid ###
trace_uid = delta["uid"]
trace_uids = [trace.uid for trace in self.data]
trace_index = trace_uids.index(trace_uid)
uid_trace = self.data[trace_index]
# #### Transform defaults to delta ####
delta_transform = BaseFigureWidget._transform_data(
uid_trace._prop_defaults, delta
)
# #### Remove overlapping properties ####
# If a property is present in both _props and _prop_defaults
# then we remove the copy from _props
remove_props = self._remove_overlapping_props(
uid_trace._props, uid_trace._prop_defaults
)
# #### Notify frontend model of property removal ####
if remove_props:
remove_trace_props_msg = {
"remove_trace": trace_index,
"remove_props": remove_props,
}
self._py2js_removeTraceProps = remove_trace_props_msg
self._py2js_removeTraceProps = None
# #### Dispatch change callbacks ####
self._dispatch_trace_change_callbacks(delta_transform, [trace_index])
# ### Trace edits no longer in process ###
self._trace_edit_in_process = False
# ### Call any waiting trace edit callbacks ###
if not self._layout_edit_in_process:
while self._waiting_edit_callbacks:
self._waiting_edit_callbacks.pop()()
self._js2py_traceDeltas = None
@observe("_js2py_layoutDelta")
def _handler_js2py_layoutDelta(self, change):
"""
Process layout delta message from the frontend
"""
# Receive message
# ---------------
msg_data = change["new"]
if not msg_data:
self._js2py_layoutDelta = None
return
layout_delta = msg_data["layout_delta"]
layout_edit_id = msg_data["layout_edit_id"]
# Apply delta
# -----------
# We only apply the delta if this message corresponds to the most
# recent layout edit operation
if layout_edit_id == self._last_layout_edit_id:
# ### Transform defaults to delta ###
delta_transform = BaseFigureWidget._transform_data(
self._layout_defaults, layout_delta
)
# ### Remove overlapping properties ###
# If a property is present in both _layout and _layout_defaults
# then we remove the copy from _layout
removed_props = self._remove_overlapping_props(
self._layout, self._layout_defaults
)
# ### Notify frontend model of property removal ###
if removed_props:
remove_props_msg = {"remove_props": removed_props}
self._py2js_removeLayoutProps = remove_props_msg
self._py2js_removeLayoutProps = None
# ### Create axis objects ###
# For example, when a SPLOM trace is created the layout defaults
# may include axes that weren't explicitly defined by the user.
for proppath in delta_transform:
prop = proppath[0]
match = self.layout._subplot_re_match(prop)
if match and prop not in self.layout:
# We need to create a subplotid object
self.layout[prop] = {}
# ### Dispatch change callbacks ###
self._dispatch_layout_change_callbacks(delta_transform)
# ### Layout edits no longer in process ###
self._layout_edit_in_process = False
# ### Call any waiting layout edit callbacks ###
if not self._trace_edit_in_process:
while self._waiting_edit_callbacks:
self._waiting_edit_callbacks.pop()()
self._js2py_layoutDelta = None
@observe("_js2py_restyle")
def _handler_js2py_restyle(self, change):
"""
Process Plotly.restyle message from the frontend
"""
# Receive message
# ---------------
restyle_msg = change["new"]
if not restyle_msg:
self._js2py_restyle = None
return
style_data = restyle_msg["style_data"]
style_traces = restyle_msg["style_traces"]
source_view_id = restyle_msg["source_view_id"]
# Perform restyle
# ---------------
self.plotly_restyle(
restyle_data=style_data,
trace_indexes=style_traces,
source_view_id=source_view_id,
)
self._js2py_restyle = None
@observe("_js2py_update")
def _handler_js2py_update(self, change):
"""
Process Plotly.update message from the frontend
"""
# Receive message
# ---------------
update_msg = change["new"]
if not update_msg:
self._js2py_update = None
return
style = update_msg["style_data"]
trace_indexes = update_msg["style_traces"]
layout = update_msg["layout_data"]
source_view_id = update_msg["source_view_id"]
# Perform update
# --------------
self.plotly_update(
restyle_data=style,
relayout_data=layout,
trace_indexes=trace_indexes,
source_view_id=source_view_id,
)
self._js2py_update = None
@observe("_js2py_relayout")
def _handler_js2py_relayout(self, change):
"""
Process Plotly.relayout message from the frontend
"""
# Receive message
# ---------------
relayout_msg = change["new"]
if not relayout_msg:
self._js2py_relayout = None
return
relayout_data = relayout_msg["relayout_data"]
source_view_id = relayout_msg["source_view_id"]
if "lastInputTime" in relayout_data:
# Remove 'lastInputTime'. Seems to be an internal plotly
# property that is introduced for some plot types, but it is not
# actually a property in the schema
relayout_data.pop("lastInputTime")
# Perform relayout
# ----------------
self.plotly_relayout(relayout_data=relayout_data, source_view_id=source_view_id)
self._js2py_relayout = None
@observe("_js2py_pointsCallback")
def _handler_js2py_pointsCallback(self, change):
"""
Process points callback message from the frontend
"""
# Receive message
# ---------------
callback_data = change["new"]
if not callback_data:
self._js2py_pointsCallback = None
return
# Get event type
# --------------
event_type = callback_data["event_type"]
# Build Selector Object
# ---------------------
if callback_data.get("selector", None):
selector_data = callback_data["selector"]
selector_type = selector_data["type"]
selector_state = selector_data["selector_state"]
if selector_type == "box":
selector = BoxSelector(**selector_state)
elif selector_type == "lasso":
selector = LassoSelector(**selector_state)
else:
raise ValueError("Unsupported selector type: %s" % selector_type)
else:
selector = None
# Build Input Device State Object
# -------------------------------
if callback_data.get("device_state", None):
device_state_data = callback_data["device_state"]
state = InputDeviceState(**device_state_data)
else:
state = None
# Build Trace Points Dictionary
# -----------------------------
points_data = callback_data["points"]
trace_points = {
trace_ind: {
"point_inds": [],
"xs": [],
"ys": [],
"trace_name": self._data_objs[trace_ind].name,
"trace_index": trace_ind,
}
for trace_ind in range(len(self._data_objs))
}
for x, y, point_ind, trace_ind in zip(
points_data["xs"],
points_data["ys"],
points_data["point_indexes"],
points_data["trace_indexes"],
):
trace_dict = trace_points[trace_ind]
trace_dict["xs"].append(x)
trace_dict["ys"].append(y)
trace_dict["point_inds"].append(point_ind)
# Dispatch callbacks
# ------------------
for trace_ind, trace_points_data in trace_points.items():
points = Points(**trace_points_data)
trace = self.data[trace_ind]
if event_type == "plotly_click":
trace._dispatch_on_click(points, state)
elif event_type == "plotly_hover":
trace._dispatch_on_hover(points, state)
elif event_type == "plotly_unhover":
trace._dispatch_on_unhover(points, state)
elif event_type == "plotly_selected":
trace._dispatch_on_selection(points, selector)
elif event_type == "plotly_deselect":
trace._dispatch_on_deselect(points)
self._js2py_pointsCallback = None
# Display
# -------
def _ipython_display_(self):
"""
Handle rich display of figures in ipython contexts
"""
# Override BaseFigure's display to make sure we display the widget version
widgets.DOMWidget._ipython_display_(self)
# Callbacks
# ---------
def on_edits_completed(self, fn):
"""
Register a function to be called after all pending trace and layout
edit operations have completed
If there are no pending edit operations then function is called
immediately
Parameters
----------
fn : callable
Function of zero arguments to be called when all pending edit
operations have completed
"""
if self._layout_edit_in_process or self._trace_edit_in_process:
self._waiting_edit_callbacks.append(fn)
else:
fn()
# Validate No Frames
# ------------------
@property
def frames(self):
# Note: This property getter is identical to that of the superclass,
# but it must be included here because we're overriding the setter
# below.
return self._frame_objs
@frames.setter
def frames(self, new_frames):
if new_frames:
BaseFigureWidget._display_frames_error()
@staticmethod
def _display_frames_error():
"""
Display an informative error when user attempts to set frames on a
FigureWidget
Raises
------
ValueError
always
"""
msg = """
Frames are not supported by the plotly.graph_objs.FigureWidget class.
Note: Frames are supported by the plotly.graph_objs.Figure class"""
raise ValueError(msg)
# Static Helpers
# --------------
@staticmethod
def _remove_overlapping_props(input_data, delta_data, prop_path=()):
"""
Remove properties in input_data that are also in delta_data, and do so
recursively.
Exception: Never remove 'uid' from input_data, this property is used
to align traces
Parameters
----------
input_data : dict|list
delta_data : dict|list
Returns
-------
list[tuple[str|int]]
List of removed property path tuples
"""
# Initialize removed
# ------------------
# This is the list of path tuples to the properties that were
# removed from input_data
removed = []
# Handle dict
# -----------
if isinstance(input_data, dict):
assert isinstance(delta_data, dict)
for p, delta_val in delta_data.items():
if isinstance(delta_val, dict) or BaseFigure._is_dict_list(delta_val):
if p in input_data:
# ### Recurse ###
input_val = input_data[p]
recur_prop_path = prop_path + (p,)
recur_removed = BaseFigureWidget._remove_overlapping_props(
input_val, delta_val, recur_prop_path
)
removed.extend(recur_removed)
# Check whether the last property in input_val
# has been removed. If so, remove it entirely
if not input_val:
input_data.pop(p)
removed.append(recur_prop_path)
elif p in input_data and p != "uid":
# ### Remove property ###
input_data.pop(p)
removed.append(prop_path + (p,))
# Handle list
# -----------
elif isinstance(input_data, list):
assert isinstance(delta_data, list)
for i, delta_val in enumerate(delta_data):
if i >= len(input_data):
break
input_val = input_data[i]
if (
input_val is not None
and isinstance(delta_val, dict)
or BaseFigure._is_dict_list(delta_val)
):
# ### Recurse ###
recur_prop_path = prop_path + (i,)
recur_removed = BaseFigureWidget._remove_overlapping_props(
input_val, delta_val, recur_prop_path
)
removed.extend(recur_removed)
return removed
@staticmethod
def _transform_data(to_data, from_data, should_remove=True, relayout_path=()):
"""
Transform to_data into from_data and return relayout-style
description of the transformation
Parameters
----------
to_data : dict|list
from_data : dict|list
Returns
-------
dict
relayout-style description of the transformation
"""
# Initialize relayout data
# ------------------------
relayout_data = {}
# Handle dict
# -----------
if isinstance(to_data, dict):
# ### Validate from_data ###
if not isinstance(from_data, dict):
raise ValueError(
"Mismatched data types: {to_dict} {from_data}".format(
to_dict=to_data, from_data=from_data
)
)
# ### Add/modify properties ###
# Loop over props/vals
for from_prop, from_val in from_data.items():
# #### Handle compound vals recursively ####
if isinstance(from_val, dict) or BaseFigure._is_dict_list(from_val):
# ##### Init property value if needed #####
if from_prop not in to_data:
to_data[from_prop] = {} if isinstance(from_val, dict) else []
# ##### Transform property val recursively #####
input_val = to_data[from_prop]
relayout_data.update(
BaseFigureWidget._transform_data(
input_val,
from_val,
should_remove=should_remove,
relayout_path=relayout_path + (from_prop,),
)
)
# #### Handle simple vals directly ####
else:
if from_prop not in to_data or not BasePlotlyType._vals_equal(
to_data[from_prop], from_val
):
to_data[from_prop] = from_val
relayout_path_prop = relayout_path + (from_prop,)
relayout_data[relayout_path_prop] = from_val
# ### Remove properties ###
if should_remove:
for remove_prop in set(to_data.keys()).difference(
set(from_data.keys())
):
to_data.pop(remove_prop)
# Handle list
# -----------
elif isinstance(to_data, list):
# ### Validate from_data ###
if not isinstance(from_data, list):
raise ValueError(
"Mismatched data types: to_data: {to_data} {from_data}".format(
to_data=to_data, from_data=from_data
)
)
# ### Add/modify properties ###
# Loop over indexes / elements
for i, from_val in enumerate(from_data):
# #### Initialize element if needed ####
if i >= len(to_data):
to_data.append(None)
input_val = to_data[i]
# #### Handle compound element recursively ####
if input_val is not None and (
isinstance(from_val, dict) or BaseFigure._is_dict_list(from_val)
):
relayout_data.update(
BaseFigureWidget._transform_data(
input_val,
from_val,
should_remove=should_remove,
relayout_path=relayout_path + (i,),
)
)
# #### Handle simple elements directly ####
else:
if not BasePlotlyType._vals_equal(to_data[i], from_val):
to_data[i] = from_val
relayout_data[relayout_path + (i,)] = from_val
return relayout_data
| mit | 7,837,312,327,912,698,000 | 33.796559 | 88 | 0.536345 | false |
Wopple/Mage | incint.py | 1 | 2535 | # Copyright 2009 Christopher Czyzewski
# This file is part of Project Mage.
#
# Project Mage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Project Mage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Project Mage. If not, see <http://www.gnu.org/licenses/>.
import sys
class IncInt:
def __init__(self, in_value, in_min, in_max, bob=False):
self.value = in_value
self.minimum = in_min
self.maximum = in_max
self.doesBob = bob
self.bobUp = True
if self.minimum > self.maximum:
print "Error in IncInt - Maximum value greater or equal to minimum value"
print str(self.value) + " is not between " + str(self.minimum) + " and " + str(self.maximum)
sys.exit()
if (self.value < self.minimum) or (self.value > self.maximum):
print "Error in IncInt - Value not in range"
sys.exit()
if self.doesBob != True and self.doesBob != False:
print "Error in IncInt - Bobbing variable not set to boolean"
sys.exit()
def increment(self):
self.value += 1
if self.value > self.maximum:
self.value = self.minimum
def decrement(self):
self.value -= 1
if self.value < self.minimum:
self.value = self.maximum
def isMin(self):
if self.value == self.minimum:
return True
else:
return False
def isMax(self):
if self.value == self.maximum:
return True
else:
return False
def isBound(self):
return ( (self.isMin()) or (self.isMax()) )
def inc(self):
self.increment()
def dec(self):
self.decrement()
def bob(self):
if self.doesBob:
if self.bobUp:
self.inc()
if self.value >= self.maximum:
self.bobUp = False
else:
self.dec()
if self.value <= self.minimum:
self.bobUp = True
| gpl-3.0 | 7,574,381,157,889,218,000 | 30.296296 | 104 | 0.577515 | false |
jota-info/Codigos_Exercicios | Python/TDD_PYTHON/test_fixo.py | 1 | 2802 | # coding: utf-8
import unittest
from numeros_telefonicos import TelefonesFixos
class TestTelefonesFixos(unittest.TestCase):
def test_verificar_quantidade_digitos_fornecidos(self):
# Devem haver no máximo 10 Dígitos, considerando DDD + Tipagem Fixo
# + Número. ex: xx [2-5] xxxxxxx
# Testando corretos
corretos = ['5534441112', '0934445577', '3829921313']
for elemento in corretos:
cl = TelefonesFixos(elemento)
self.assertTrue(cl._verificar_tamanho())
# Testando incorretos
# Menor que; Maior que; Com caracteres
incorretos = ['123', '1234567890111213', 'dasd321FDSF21as']
for elemento in incorretos:
cl = TelefonesFixos(elemento)
self.assertFalse(cl._verificar_tamanho())
def test_verificar_ddd(self):
# Valem quaisquer dois dígitos, que não comecem com 0
# Considera-se que caracteres são excluídos com a verificação acima
# Testando corretos
corretos = ['5534441641', '4734445544', '1134440091']
for elemento in corretos:
cl = TelefonesFixos(elemento)
self.assertTrue(cl._verificar_ddd())
# Testando incorretos
# A única chance de falha é caso aplique-se o número 0
cl = TelefonesFixos('0734441515')
self.assertFalse(cl._verificar_ddd())
def test_validar_como_fixo(self):
# Verifica se está na faixa [2-5]
# Telefones nas Faixas 2,3,4 e 5
corretos = ['4723995530', '1134496567', '8544448774', '8554537777']
for elemento in corretos:
cl = TelefonesFixos(elemento)
self.assertTrue(cl._validar_como_fixo())
# Telefones fora das Faixas 2,3,4 e 5
incorretos = ['1113995530', '1464496567', '4574448774', '4884537777']
for elemento in incorretos:
cl = TelefonesFixos(elemento)
self.assertFalse(cl._validar_como_fixo())
def test_ddd(self):
# Verifica se retorna o ddd passado como instância da classe
cl = TelefonesFixos('4734441515')
self.assertEqual('47', cl.ddd())
def test_get_numero(self):
# Verifica se retorna o número passado como instância da classe
cl = TelefonesFixos('4734441515')
self.assertEqual('34441515', cl.get_numero())
def test_validar(self):
# Verifica se o método validar está funcionando como meio de chamar
# o método privado validar_como_fixo
cl = TelefonesFixos('4734441515')
self.assertTrue(cl.validar())
incorretos = ['314441641', '31dasjid']
for elemento in incorretos:
cl = TelefonesFixos(elemento)
self.assertFalse(cl.validar())
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 2,148,472,401,898,124,500 | 34.679487 | 77 | 0.630255 | false |
feedzilla/feedzilla | feedzilla/migrations/0004_auto__chg_field_feed_feed_url__chg_field_feed_site_url__add_index_feed.py | 1 | 4987 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Feed.feed_url'
db.alter_column(u'feedzilla_feed', 'feed_url', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255))
# Changing field 'Feed.site_url'
db.alter_column(u'feedzilla_feed', 'site_url', self.gf('django.db.models.fields.CharField')(max_length=255))
# Adding index on 'Feed', fields ['active']
db.create_index(u'feedzilla_feed', ['active'])
def backwards(self, orm):
# Removing index on 'Feed', fields ['active']
db.delete_index(u'feedzilla_feed', ['active'])
# Changing field 'Feed.feed_url'
db.alter_column(u'feedzilla_feed', 'feed_url', self.gf('django.db.models.fields.URLField')(max_length=200, unique=True))
# Changing field 'Feed.site_url'
db.alter_column(u'feedzilla_feed', 'site_url', self.gf('django.db.models.fields.URLField')(max_length=200))
models = {
u'feedzilla.feed': {
'Meta': {'object_name': 'Feed'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'etag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'site_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'skip_filters': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'feedzilla.filtertag': {
'Meta': {'object_name': 'FilterTag'},
'exact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'feedzilla.filterword': {
'Meta': {'object_name': 'FilterWord'},
'exact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'feedzilla.post': {
'Meta': {'ordering': "['-created']", 'object_name': 'Post'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': u"orm['feedzilla.Feed']"}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.TextField', [], {}),
'summary': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'feedzilla.request': {
'Meta': {'ordering': "['-created']", 'object_name': 'Request'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['feedzilla']
| bsd-3-clause | 3,697,462,035,953,831,400 | 58.369048 | 132 | 0.553639 | false |
amacd31/hydromet_graph | setup.py | 1 | 1471 | import os
from io import open
import versioneer
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = ''.join([
line for line in f.readlines() if 'travis-ci' not in line
])
setup(
name='hydromet_graph',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Graphing library for hydrological and meteorological variables.',
long_description=long_description,
author='Andrew MacDonald',
author_email='[email protected]',
license='BSD',
url='https://github.com/amacd31/hydromet_graph',
install_requires=['numpy', 'pandas'],
packages = ['hydromet_graph'],
test_suite = 'tests',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| bsd-3-clause | -5,858,002,106,514,230,000 | 33.209302 | 82 | 0.626785 | false |
hbprotoss/weii | app/theme_manager.py | 1 | 1546 | #coding=utf-8
import os
from app import constant
from app import misc
INFO = 'Info'
SKIN = 'Skin'
ICON = 'Icon'
THEME_CONFIG = 'conf.ini'
class Theme():
params = {}
path = ''
# Internal
def __loadTheme( theme_name = 'default' ):
'''
@param theme_name: The name of theme
@return: widget.theme.Theme object
'''
THEME_ROOT = os.path.join( constant.APP_ROOT, 'theme', theme_name )
ICON_ROOT = os.path.join( THEME_ROOT, 'icon' )
conf = misc.ConfParser()
conf.read( os.path.join( THEME_ROOT, THEME_CONFIG ) )
theme = Theme()
theme.params[INFO] = dict( conf.items( INFO ) )
theme.params[SKIN] = dict( conf.items( SKIN ) )
theme.params[SKIN]['background-image'] = os.path.join( THEME_ROOT, theme.params[SKIN]['background-image'] )
theme.params[SKIN]['loading-image'] = os.path.join( THEME_ROOT, theme.params[SKIN]['loading-image'] )
theme.params[SKIN]['zoom-in-cursor'] = os.path.join(THEME_ROOT, theme.params[SKIN]['zoom-in-cursor'])
theme.params[SKIN]['upload-pic'] = os.path.join(THEME_ROOT, theme.params[SKIN]['upload-pic'])
theme.params[ICON] = {k:os.path.join( ICON_ROOT, v ) for k, v in conf.items( ICON )}
theme.path = THEME_ROOT
return theme
__g_theme = __loadTheme()
# Exports
def setCurrentTheme(theme_name):
global __g_theme
__g_theme = __loadTheme(theme_name)
def getCurrentTheme():
return __g_theme
def getParameter(section, key):
return __g_theme.params[section][key]
def getPath():
return __g_theme.path
| mit | 3,204,480,994,669,589,500 | 26.140351 | 111 | 0.639715 | false |
0rmi/tyggbot | models/emote.py | 1 | 4022 | import logging
from collections import UserDict
import pymysql
import re
log = logging.getLogger('tyggbot')
class Emote:
def __init__(self):
self.id = -1 # An ID of -1 means the emote will be inserted on sync
self.emote_id = None
self.code = None # This value will be inserted when the update_emotes script is called, if necessary.
self.tm = 0
self.tm_record = 0
self.count = 0
self.needs_sync = False
self.regex = None
@classmethod
def load(cls, cursor, emote_id):
emote = cls()
emote.emote_id = emote_id
emote.regex = None
emote.needs_sync = True
return emote
@classmethod
def load_from_row(cls, row):
emote = cls()
emote.id = row['id']
emote.emote_id = row['emote_id']
emote.code = row['code']
if not emote.emote_id:
emote.regex = re.compile('(?<![^ ]){0}(?![^ ])'.format(re.escape(emote.code)))
emote.count = row['count']
emote.tm_record = row['tm_record']
return emote
def add(self, count, reactor):
self.count += count
self.tm += count
self.needs_sync = True
if self.tm > self.tm_record:
self.tm_record = self.tm
reactor.execute_delayed(60, self.reduce, (count, ))
def reduce(self, count):
self.tm -= count
def sync(self, cursor):
if self.id == -1:
cursor.execute('INSERT INTO `tb_emote` (`emote_id`, `code`, `tm_record`, `count`) VALUES (%s, %s, %s, %s)',
(self.emote_id, self.code, self.tm_record, self.count))
self.id = cursor.lastrowid
else:
cursor.execute('UPDATE `tb_emote` SET `tm_record`=%s, `count`=%s WHERE `id`=%s',
(self.tm_record, self.count, self.id))
class EmoteManager(UserDict):
def __init__(self, sqlconn):
UserDict.__init__(self)
self.sqlconn = sqlconn
self.custom_data = []
def get_cursor(self):
self.sqlconn.ping()
return self.sqlconn.cursor(pymysql.cursors.DictCursor)
def get_normal_cursor(self):
self.sqlconn.ping()
return self.sqlconn.cursor()
def sync(self):
self.sqlconn.autocommit(False)
cursor = self.get_normal_cursor()
for emote in [emote for k, emote in self.data.items() if emote.needs_sync]:
emote.sync(cursor)
cursor.close()
self.sqlconn.autocommit(True)
def load(self):
self.data = {}
self.custom_data = []
cursor = self.get_cursor()
cursor.execute('SELECT * FROM `tb_emote`')
for row in cursor:
emote = Emote.load_from_row(row)
self.add_to_data(emote)
cursor.close()
def add_to_data(self, emote):
if emote.emote_id:
self.data[emote.emote_id] = emote
if emote.code:
self.data[emote.code] = emote
else:
self.custom_data.append(emote)
if emote.code:
self.data['custom_' + emote.code] = emote
def __getitem__(self, key):
if key not in self.data:
try:
# We can only dynamically add emotes that are ID-based
value = int(key)
except ValueError:
return None
log.info('Adding new emote with ID {0}'.format(value))
emote = Emote.load(self.get_cursor(), value)
self.add_to_data(emote)
return self.data[key]
def find(self, key):
try:
emote_id = int(key)
except ValueError:
emote_id = None
if emote_id:
return self.data[emote_id]
else:
key = str(key)
if key in self.data:
return self.data[key]
else:
for emote in self.custom_data:
if emote.code == key:
return emote
return None
| mit | 5,339,142,539,287,830,000 | 27.524823 | 119 | 0.533814 | false |
anaderi/lhcb_trigger_ml | assets/paperdraft/graphs/plottheilvssde.py | 1 | 1916 | import os,sys
import ROOT
from ROOT import *
from math import *
from array import *
filestoplot = { 'mse peak' : {'filename' : 'mse1.txt', 'colour' : 1, 'marker' : 20},
'mse pit' : {'filename' : 'mse2.txt', 'colour' : 2, 'marker' : 21},
'theil peak' : {'filename' : 'theil1.txt', 'colour' : 3, 'marker' : 22},
'theil pit' : {'filename' : 'theil2.txt', 'colour' : 4, 'marker' : 23},}
for entry in filestoplot :
thisfile = open(filestoplot[entry]['filename'],'r')
filestoplot[entry]['x'] = array('f',[0])
filestoplot[entry]['y'] = array('f',[0])
num = 0
for line in thisfile :
filestoplot[entry]['x'].append(float(line.split()[0]))
filestoplot[entry]['y'].append(float(line.split()[1].strip('\n')))
num += 1
filestoplot[entry]['graph'] = TGraph(num,filestoplot[entry]['x'],filestoplot[entry]['y'])
filestoplot[entry]['graph'].SetLineColor(filestoplot[entry]['colour'])
filestoplot[entry]['graph'].SetMarkerColor(filestoplot[entry]['colour'])
filestoplot[entry]['graph'].SetMarkerStyle(filestoplot[entry]['marker'])
filestoplot[entry]['graph'].GetXaxis().SetTitle('#alpha')
filestoplot[entry]['graph'].GetXaxis().SetNdivisions(510)
filestoplot[entry]['graph'].GetYaxis().SetTitle('uniformity')
thisfile.close()
print filestoplot
c1 = TCanvas("c1","c1",1000,800)
c1.cd()
i=1
for entry in filestoplot :
if i==1 :
filestoplot[entry]['graph'].Draw()
filestoplot[entry]['graph'].GetYaxis().SetRangeUser(0,2)
else :
filestoplot[entry]['graph'].Draw('PL')
i += 1
c1.GetPad(0).SetLogy()
leg = TLegend(0.6,0.2,0.9,0.5)
leg.SetFillStyle(0)
leg.SetFillColor(0)
leg.SetMargin(0.35)
leg.SetTextSize(0.04)
for entry in filestoplot :
leg.AddEntry(filestoplot[entry]['graph'],entry,'p')
leg.Draw("SAME")
c1.SaveAs('TheilVsMSE.pdf')
c1.SaveAs('TheilVsMSE.eps')
c1.SaveAs('TheilVsMSE.png')
c1.SaveAs('TheilVsMSE.root')
| mit | 294,535,963,766,876,400 | 32.034483 | 91 | 0.648747 | false |
sdsingh/e-mission-server | CFC_WebApp/main/featurecalc.py | 1 | 15622 | from __future__ import division
import math
import logging
import numpy as np
from pymongo import MongoClient
import utm
from sklearn.cluster import DBSCAN
from get_database import get_routeCluster_db,get_transit_db
from uuid import UUID
from route_matching import getRoute,fullMatchDistance,matchTransitRoutes,matchTransitStops
from common import get_mode_share_by_count
Sections = MongoClient('localhost').Stage_database.Stage_Sections
Modes=MongoClient('localhost').Stage_database.Stage_Modes
# Returns distance in m
def Include_place(lst,place,radius):
# list of tracking points
count=0
for pnt in lst:
count=count+(1 if calDistance(pnt,place)<=radius else 0)
if count>0:
return True
else:
return False
def calDistance(point1, point2):
earthRadius = 6371000
dLat = math.radians(point1[0]-point2[0])
dLon = math.radians(point1[1]-point2[1])
lat1 = math.radians(point1[0])
lat2 = math.radians(point2[0])
a = (math.sin(dLat/2) ** 2) + ((math.sin(dLon/2) ** 2) * math.cos(lat1) * math.cos(lat2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = earthRadius * c
return d
# The speed is in m/s
def calSpeed(trackpoint1, trackpoint2):
from dateutil import parser
distanceDelta = calDistance(trackpoint1['track_location']['coordinates'],
trackpoint2['track_location']['coordinates'])
timeDelta = parser.parse(trackpoint2['time']) - parser.parse(trackpoint1['time'])
logging.debug("while calculating speed form %s -> %s, distanceDelta = %s, timeDelta = %s" %
(trackpoint1, trackpoint2, distanceDelta, timeDelta))
if timeDelta.total_seconds() != 0:
return distanceDelta / timeDelta.total_seconds()
else:
return None
# This formula is from:
# http://www.movable-type.co.uk/scripts/latlong.html
# It returns the heading between two points using
def calHeading(point1, point2):
phi1 = math.radians(point1[0])
phi2 = math.radians(point2[0])
lambda1 = math.radians(point1[1])
lambda2 = math.radians(point2[1])
y = math.sin(lambda2-lambda1) * math.cos(phi2)
x = math.cos(phi1)*math.sin(phi2) - \
math.sin(phi1)*math.cos(phi2)*math.cos(lambda2-lambda1)
brng = math.degrees(math.atan2(y, x))
return brng
def calHC(point1, point2, point3):
HC = calHeading(point2, point3) - calHeading(point1, point2)
return HC
def calHCR(segment):
trackpoints = segment['track_points']
if len(trackpoints) < 3:
return 0
else:
HCNum = 0
for (i, point) in enumerate(trackpoints[:-2]):
currPoint = point
nextPoint = trackpoints[i+1]
nexNextPt = trackpoints[i+2]
HC = calHC(currPoint['track_location']['coordinates'], nextPoint['track_location']['coordinates'], \
nexNextPt['track_location']['coordinates'])
if HC >= 15:
HCNum += 1
segmentDist = segment['distance']
if segmentDist!= None and segmentDist != 0:
HCR = HCNum/segmentDist
return HCR
else:
return 0
def calSR(segment):
trackpoints = segment['track_points']
if len(trackpoints) < 2:
return 0
else:
stopNum = 0
for (i, point) in enumerate(trackpoints[:-1]):
currPoint = point
nextPoint = trackpoints[i+1]
currVelocity = calSpeed(currPoint, nextPoint)
if currVelocity != None and currVelocity <= 0.75:
stopNum += 1
segmentDist = segment['distance']
if segmentDist != None and segmentDist != 0:
return stopNum/segmentDist
else:
return 0
def calVCR(segment):
trackpoints = segment['track_points']
if len(trackpoints) < 3:
return 0
else:
Pv = 0
for (i, point) in enumerate(trackpoints[:-2]):
currPoint = point
nextPoint = trackpoints[i+1]
nexNextPt = trackpoints[i+2]
velocity1 = calSpeed(currPoint, nextPoint)
velocity2 = calSpeed(nextPoint, nexNextPt)
if velocity1 != None and velocity2 != None:
if velocity1 != 0:
VC = abs(velocity2 - velocity1)/velocity1
else:
VC = 0
else:
VC = 0
if VC > 0.7:
Pv += 1
segmentDist = segment['distance']
if segmentDist != None and segmentDist != 0:
return Pv/segmentDist
else:
return 0
def calSegmentDistance(segment):
return segment['distance']
def calSpeeds(segment):
trackpoints = segment['track_points']
if len(trackpoints) == 0:
return None
speeds = np.zeros(len(trackpoints) - 1)
for (i, point) in enumerate(trackpoints[:-1]):
currPoint = point
nextPoint = trackpoints[i+1]
currSpeed = calSpeed(currPoint, nextPoint)
if currSpeed != None:
speeds[i] = currSpeed
logging.debug("Returning vector of length %s while calculating speeds for trackpoints of length %s " % (speeds.shape, len(trackpoints)))
return speeds
def calAvgSpeed(segment):
timeDelta = segment['section_end_datetime'] - segment['section_start_datetime']
if timeDelta.total_seconds() != 0:
return segment['distance'] / timeDelta.total_seconds()
else:
return None
# In order to calculate the acceleration, we do the following.
# point0: (loc0, t0), point1: (loc1, t1), point2: (loc2, t2), point3: (loc3, t3)
# becomes
# speed0: ((loc1 - loc0) / (t1 - t0)), speed1: ((loc2 - loc1) / (t2-t1)),
# speed2: ((loc3 - loc2) / (t3 - t2)
# becomes
# segment0: speed0 / (t1 - t0), segment1: (speed1 - speed0)/(t2-t1),
# segment2: (speed2 - speed1) / (t3-t2)
def calAccels(segment):
from dateutil import parser
speeds = calSpeeds(segment)
trackpoints = segment['track_points']
if speeds == None:
return None
accel = np.zeros(len(speeds) - 1)
prevSpeed = 0
for (i, speed) in enumerate(speeds[0:-1]):
currSpeed = speed # speed0
speedDelta = currSpeed - prevSpeed # (speed0 - 0)
# t1 - t0
timeDelta = parser.parse(trackpoints[i+1]['time']) - parser.parse(trackpoints[i]['time'])
logging.debug("while calculating accels from %s -> %s, speedDelta = %s, timeDelta = %s" %
(trackpoints[i+1], trackpoints[i], speedDelta, timeDelta))
if timeDelta.total_seconds() != 0:
accel[i] = speedDelta/(timeDelta.total_seconds())
# logging.debug("resulting acceleration is %s" % accel[i])
prevSpeed = currSpeed
return accel
def getIthMaxSpeed(segment, i):
# python does not appear to have a built-in mechanism for returning the top
# ith max. We would need to write our own, possibly by sorting. Since it is
# not clear whether we ever actually need this (the paper does not explain
# which i they used),
assert(i == 1)
speeds = calSpeeds(segment)
return np.amax(speeds)
def getIthMaxAccel(segment, i):
# python does not appear to have a built-in mechanism for returning the top
# ith max. We would need to write our own, possibly by sorting. Since it is
# not clear whether we ever actually need this (the paper does not explain
# which i they used),
assert(i == 1)
accels = calAccels(segment)
return np.amax(accels)
def calSpeedDistParams(speeds):
return (np.mean(speeds), np.std(speeds))
# def user_tran_mat(user):
# user_sections=[]
# # print(tran_mat)
# query = {"$and": [{'type': 'move'},{'user_id':user},\
# {'$or': [{'confirmed_mode':1}, {'confirmed_mode':3},\
# {'confirmed_mode':5},{'confirmed_mode':6},{'confirmed_mode':7}]}]}
# # print(Sections.find(query).count())
# for section in Sections.find(query).sort("section_start_datetime",1):
# user_sections.append(section)
# if Sections.find(query).count()>=2:
# tran_mat=np.zeros([Modes.find().count(), Modes.find().count()])
# for i in range(len(user_sections)-1):
# if (user_sections[i+1]['section_start_datetime']-user_sections[i]['section_end_datetime']).seconds<=60:
# # print(user_sections[i+1]['section_start_datetime'],user_sections[i]['section_end_datetime'])
# fore_mode=user_sections[i]["confirmed_mode"]
# after_mode=user_sections[i+1]["confirmed_mode"]
# tran_mat[fore_mode-1,after_mode-1]+=1
# row_sums = tran_mat.sum(axis=1)
# new_mat = tran_mat / row_sums[:, np.newaxis]
# return new_mat
# else:
# return None
#
# # all model
# def all_tran_mat():
# tran_mat=np.zeros([Modes.find().count(), Modes.find().count()])
# for user in Sections.distinct("user_id"):
# user_sections=[]
# # print(tran_mat)
# query = {"$and": [{'type': 'move'},{'user_id':user},\
# {'$or': [{'confirmed_mode':1}, {'confirmed_mode':3},\
# {'confirmed_mode':5},{'confirmed_mode':6},{'confirmed_mode':7}]}]}
# # print(Sections.find(query).count())
# for section in Sections.find(query).sort("section_start_datetime",1):
# user_sections.append(section)
# if Sections.find(query).count()>=2:
# for i in range(len(user_sections)-1):
# if (user_sections[i+1]['section_start_datetime']-user_sections[i]['section_end_datetime']).seconds<=60:
# # print(user_sections[i+1]['section_start_datetime'],user_sections[i]['section_end_datetime'])
# fore_mode=user_sections[i]["confirmed_mode"]
# after_mode=user_sections[i+1]["confirmed_mode"]
# tran_mat[fore_mode-1,after_mode-1]+=1
# row_sums = tran_mat.sum(axis=1)
# new_mat = tran_mat / row_sums[:, np.newaxis]
# return new_mat
def mode_cluster(mode,eps,sam):
mode_change_pnts=[]
# print(tran_mat)
query = {"$and": [{'type': 'move'},\
{'confirmed_mode':mode}]}
# print(Sections.find(query).count())
for section in Sections.find(query).sort("section_start_datetime",1):
try:
mode_change_pnts.append(section['section_start_point']['coordinates'])
mode_change_pnts.append(section['section_end_point']['coordinates'])
except:
pass
# print(user_change_pnts)
# print(len(mode_change_pnts))
if len(mode_change_pnts)>=1:
# print(mode_change_pnts)
np_points=np.array(mode_change_pnts)
# print(np_points[:,0])
# fig, axes = plt.subplots(1, 1)
# axes.scatter(np_points[:,0], np_points[:,1])
# plt.show()
else:
pass
utm_x = []
utm_y = []
for row in mode_change_pnts:
utm_loc = utm.from_latlon(row[0],row[1])
utm_x = np.append(utm_x,utm_loc[0])
utm_y = np.append(utm_y,utm_loc[1])
utm_location = np.column_stack((utm_x,utm_y))
db = DBSCAN(eps=eps,min_samples=sam)
db_fit = db.fit(utm_location)
db_labels = db_fit.labels_
#print db_labels
new_db_labels = db_labels[db_labels!=-1]
new_location = np_points[db_labels!=-1]
# print len(new_db_labels)
# print len(new_location)
# print new_information
label_unique = np.unique(new_db_labels)
cluster_center = np.zeros((len(label_unique),2))
for label in label_unique:
sub_location = new_location[new_db_labels==label]
temp_center = np.mean(sub_location,axis=0)
cluster_center[int(label)] = temp_center
# print cluster_center
return cluster_center
#
# print(mode_cluster(6))
def mode_start_end_coverage(segment,cluster,eps):
mode_change_pnts=[]
# print(tran_mat)
num_sec=0
centers=cluster
# print(centers)
try:
if Include_place(centers,segment['section_start_point']['coordinates'],eps) and \
Include_place(centers,segment['section_end_point']['coordinates'],eps):
return 1
else:
return 0
except:
return 0
# print(mode_start_end_coverage(5,105,2))
# print(mode_start_end_coverage(6,600,2))
# This is currently only used in this file, so it is fine to use only really
# user confirmed modes. We don't want to learn on trips where we don't have
# ground truth.
def get_mode_share_by_count(lst):
# input here is a list of sections
displayModeList = getDisplayModes()
# logging.debug(displayModeList)
modeCountMap = {}
for mode in displayModeList:
modeCountMap[mode['mode_name']] = 0
for section in lst:
if section['confirmed_mode']==mode['mode_id']:
modeCountMap[mode['mode_name']] +=1
elif section['mode']==mode['mode_id']:
modeCountMap[mode['mode_name']] +=1
return modeCountMap
# This is currently only used in this file, so it is fine to use only really
# user confirmed modes. We don't want to learn on trips where we don't have
# ground truth.
def get_mode_share_by_count(list_idx):
Sections=get_section_db()
## takes a list of idx's
AllModeList = getAllModes()
MODE = {}
MODE2= {}
for mode in AllModeList:
MODE[mode['mode_id']]=0
for _id in list_idx:
section=Sections.find_one({'_id': _id})
mode_id = section['confirmed_mode']
try:
MODE[mode_id] += 1
except KeyError:
MODE[mode_id] = 1
# print(sum(MODE.values()))
if sum(MODE.values())==0:
for mode in AllModeList:
MODE2[mode['mode_id']]=0
# print(MODE2)
else:
for mode in AllModeList:
MODE2[mode['mode_id']]=MODE[mode['mode_id']]/sum(MODE.values())
return MODE2
def cluster_route_match_score(segment,step1=100000,step2=100000,method='lcs',radius1=2000,threshold=0.5):
userRouteClusters=get_routeCluster_db().find_one({'$and':[{'user':segment['user_id']},{'method':method}]})['clusters']
route_seg = getRoute(segment['_id'])
dis=999999
medoid_ids=userRouteClusters.keys()
if len(medoid_ids)!=0:
choice=medoid_ids[0]
for idx in userRouteClusters.keys():
route_idx=getRoute(idx)
try:
dis_new=fullMatchDistance(route_seg,route_idx,step1,step2,method,radius1)
except RuntimeError:
dis_new=999999
if dis_new<dis:
dis=dis_new
choice=idx
# print(dis)
# print(userRouteClusters[choice])
if dis<=threshold:
cluster=userRouteClusters[choice]
cluster.append(choice)
ModePerc=get_mode_share_by_count(cluster)
else:
ModePerc=get_mode_share_by_count([])
return ModePerc
def transit_route_match_score(segment,step1=100000,step2=100000,method='lcs',radius1=2500,threshold=0.5):
Transits=get_transit_db()
transitMatch={}
route_seg=getRoute(segment['_id'])
for type in Transits.distinct('type'):
for entry in Transits.find({'type':type}):
transitMatch[type]=matchTransitRoutes(route_seg,entry['stops'],step1,step2,method,radius1,threshold)
if transitMatch[entry['type']]==1:
break
return transitMatch
def transit_stop_match_score(segment,radius1=300):
Transits=get_transit_db()
transitMatch={}
route_seg=getRoute(segment['_id'])
for type in Transits.distinct('type'):
for entry in Transits.find({'type':type}):
transitMatch[type]=matchTransitStops(route_seg,entry['stops'],radius1)
if transitMatch[entry['type']]==1:
break
return transitMatch
| bsd-3-clause | -4,801,782,847,972,583,000 | 35.24594 | 140 | 0.611701 | false |
AntagonistHQ/openprovider.py | openprovider/modules/ssl.py | 1 | 5835 | # coding=utf-8
from openprovider.modules import E, OE, common
from openprovider.models import SSLProduct, SSLOrder
def _domain_validation_methods(methods):
if not methods:
return None
items = [E.item(E.hostName(hostname), E.method(method)) for hostname, method in methods.items()]
return E.array(*items)
def _simple_array(hostnames):
items = [E.item(name) for name in hostnames]
return E.array(*items)
class SSLModule(common.Module):
"""Bindings to API methods in the SSL module."""
def search_product(self, limit=100, offset=0, with_price=0, with_supported_software=0,
with_description=0):
"""Search the list of available products."""
response = self.request(E.searchProductSslCertRequest(
E.limit(limit),
E.offset(offset),
E.withPrice(int(with_price)),
E.withSupportedSoftware(int(with_supported_software)),
E.withDescription(int(with_description)),
))
return response.as_models(SSLProduct)
def retrieve_product(self, product_id):
"""Retrieve details on a single product."""
response = self.request(E.retrieveProductSslCertRequest(
E.id(product_id)
))
return response.as_model(SSLProduct)
def search_order(self, limit=100, offset=0, common_name_pattern=None, status=None,
contact_handle=None):
"""Search all SSL certificate orders."""
response = self.request(E.searchOrderSslCertRequest(
E.limit(limit),
E.offset(offset),
OE('commonNamePattern', common_name_pattern),
OE('status', status, transform=_simple_array),
OE('contactHandle', contact_handle),
))
return response.as_models(SSLOrder)
def retrieve_order(self, order_id):
"""Retrieve details on a single order."""
response = self.request(E.retrieveOrderSslCertRequest(
E.id(order_id)
))
return response.as_model(SSLOrder)
def create(self, product_id, period, csr, software_id, organization_handle,
approver_email=None, signature_hash_algorithm=None, domain_validation_methods=None,
hostnames=None, technical_handle=None):
"""Order a new SSL certificate."""
response = self.request(E.createSslCertRequest(
E.productId(product_id),
E.period(period),
E.csr(csr),
E.softwareId(software_id),
E.organizationHandle(organization_handle),
OE('approverEmail', approver_email),
OE('signatureHashAlgorithm', signature_hash_algorithm),
OE('domainValidationMethods', domain_validation_methods, transform=_domain_validation_methods),
OE('hostNames', hostnames, transform=_simple_array),
OE('technicalHandle', technical_handle),
))
return int(response.data.id)
def renew(self, order_id):
response = self.request(E.renewSslCertRequest(
E.id(order_id),
))
return int(response.data.id)
def reissue(self, order_id, csr, software_id, organization_handle, approver_email=None,
signature_hash_algorithm=None, domain_validation_methods=None, hostnames=None,
technical_handle=None):
"""Reissue an SSL certificate order"""
response = self.request(E.reissueSslCertRequest(
E.id(order_id),
E.csr(csr),
E.softwareId(software_id),
E.organizationHandle(organization_handle),
OE('approverEmail', approver_email),
OE('signatureHashAlgorithm', signature_hash_algorithm),
OE('domainValidationMethods', domain_validation_methods, transform=_domain_validation_methods),
OE('hostNames', hostnames, transform=_simple_array),
OE('technicalHandle', technical_handle),
))
return int(response.data.id)
def modify(self, order_id, approver_email=None, domain_validation_methods=None):
"""Modify an ordered SSL certificate."""
response = self.request(E.modifySslCertRequest(
E.id(order_id),
OE('approverEmail', approver_email),
OE('domainValidationMethods', domain_validation_methods, transform=_domain_validation_methods),
))
return response.data
def cancel(self, order_id):
"""Cancel an ordered SSL certificate."""
response = self.request(E.cancelSslCertRequest(
E.id(order_id)
))
return int(response.data.id)
def retrieve_approver_email_list(self, domain, product_id):
"""Retrieve the list of allowed approver email addresses."""
response = self.request(E.retrieveApproverEmailListSslCertRequest(
E.domain(domain),
E.productId(product_id)
))
return [str(i) for i in response.data.array[0].item]
def resend_approver_email(self, order_id):
"""Resend the activation email to the approver."""
response = self.request(E.resendApproverEmailSslCertRequest(
E.id(order_id)
))
return int(response.data.id)
def change_approver_email_address(self, order_id, approver_email):
"""Change the approver email address for an ordered SSL certificate."""
response = self.request(
E.changeApproverEmailAddressSslCertRequest(
E.id(order_id),
E.approverEmail(approver_email)
)
)
return int(response.data.id)
def decode_csr(self, csr):
"""Decode a CSR and return its data."""
response = self.request(E.decodeCsrSslCertRequest(
E.csr(csr)
))
return response.data
| mit | -8,379,871,851,793,526,000 | 33.122807 | 107 | 0.61868 | false |
hickerson/bbn | fable/fable_sources/libtbx/easy_run.py | 1 | 14678 | from __future__ import division
import sys
# XXX how early a version can we get away with using the built-in module?
if (sys.version_info[1] >= 7) :
import subprocess
else :
try:
from libtbx import subprocess_with_fixes as subprocess
except ImportError:
try :
import subprocess_with_fixes as subprocess
except ImportError :
import subprocess
import sys, os
def _show_lines(lines, out, prefix):
if (out is None): out = sys.stdout
for line in lines:
print >> out, prefix+line
class fully_buffered_base(object):
def format_errors_if_any(self):
assert not self.join_stdout_stderr
if (len(self.stderr_lines) != 0):
msg = ["child process stderr output:"]
msg.append(" command: " + repr(self.command))
for line in self.stderr_lines:
msg.append(" " + line)
return "\n".join(msg)
return None
def raise_if_errors(self, Error=RuntimeError):
assert not self.join_stdout_stderr
msg = self.format_errors_if_any()
if (msg is not None):
raise Error(msg)
return self
def raise_if_output(self, show_output_threshold=10, Error=RuntimeError):
def start_msg():
result = ["unexpected child process output:"]
result.append(" command: " + repr(self.command))
return result
if (self.stdout_buffer is not None):
if (len(self.stdout_buffer) != 0):
msg = start_msg()
msg.append(" length of output: %d bytes" % len(self.stdout_buffer))
raise Error("\n".join(msg))
elif (len(self.stdout_lines) != 0):
msg = start_msg()
for line in self.stdout_lines[:show_output_threshold]:
msg.append(" " + line)
n = len(self.stdout_lines)
if (n > show_output_threshold):
if (n <= show_output_threshold+2):
for line in self.stdout_lines[show_output_threshold:n]:
msg.append(" " + line)
else:
msg.append(" ...")
msg.append(" remaining %d lines omitted."
% (n-show_output_threshold))
raise Error("\n".join(msg))
return self
def raise_if_errors_or_output(self, Error=RuntimeError):
self.raise_if_errors(Error=Error)
self.raise_if_output(Error=Error)
return self
def show_stderr(self, out=None, prefix=""):
_show_lines(lines=self.stderr_lines, out=out, prefix=prefix)
def show_stdout(self, out=None, prefix=""):
assert self.stdout_lines is not None
_show_lines(lines=self.stdout_lines, out=out, prefix=prefix)
class fully_buffered_simple(fully_buffered_base):
"""\
Executes command, sends stdin_lines (str or sequence), then reads
stdout_lines first, stderr_lines second (if join_stdout_stderr
is False).
The constructor may deadlock if the I/O buffers are too small to allow
the blocking write and reads in the given sequence. Specifically,
stdin_lines may be too big, or there may be too many stderr_lines,
but there can be any number of stdout_lines. The tests below are
known to work under Mac OS X, Windows XP, IRIX, and Tru64 Unix with
stdin_lines up to 1000000, stderr_lines up to 500. I.e. this simple
implementation should cover most practical situations.
"""
def __init__(self,
command,
stdin_lines=None,
join_stdout_stderr=False,
stdout_splitlines=True,
bufsize=-1):
self.command = command
self.join_stdout_stderr = join_stdout_stderr
if (join_stdout_stderr):
child_stdin, child_stdout = os.popen4(command, "t", bufsize)
child_stderr = None
else:
child_stdin, child_stdout, child_stderr = os.popen3(command,"t",bufsize)
if (stdin_lines is not None):
if (not isinstance(stdin_lines, str)):
stdin_lines = os.linesep.join(stdin_lines)
if (len(stdin_lines) != 0):
stdin_lines += os.linesep
child_stdin.write(stdin_lines)
child_stdin.close()
if (stdout_splitlines):
self.stdout_buffer = None
self.stdout_lines = child_stdout.read().splitlines()
else:
self.stdout_buffer = child_stdout.read()
self.stdout_lines = None
if (child_stderr is not None):
self.stderr_lines = child_stderr.read().splitlines()
else:
self.stderr_lines = []
child_stdout.close()
if (child_stderr is not None):
child_stderr.close()
self.return_code = None
class fully_buffered_subprocess(fully_buffered_base):
"This implementation is supposed to never block."
def __init__(self,
command,
stdin_lines=None,
join_stdout_stderr=False,
stdout_splitlines=True,
bufsize=-1):
self.command = command
self.join_stdout_stderr = join_stdout_stderr
if (not isinstance(command, str)):
command = subprocess.list2cmdline(command)
if (stdin_lines is not None):
if (not isinstance(stdin_lines, str)):
stdin_lines = os.linesep.join(stdin_lines)
if (len(stdin_lines) != 0):
stdin_lines += os.linesep
if (join_stdout_stderr):
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(
args=command,
shell=True,
bufsize=bufsize,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=stderr,
universal_newlines=True,
close_fds=not subprocess.mswindows)
o, e = p.communicate(input=stdin_lines)
if (stdout_splitlines):
self.stdout_buffer = None
self.stdout_lines = o.splitlines()
else:
self.stdout_buffer = o
self.stdout_lines = None
if (join_stdout_stderr):
self.stderr_lines = []
else:
self.stderr_lines = e.splitlines()
self.return_code = p.returncode
fully_buffered = fully_buffered_subprocess
def go(command, stdin_lines=None):
return fully_buffered(
command=command,
stdin_lines=stdin_lines,
join_stdout_stderr=True)
def call(command):
for s in [sys.stdout, sys.stderr]:
flush = getattr(s, "flush", None)
if (flush is not None): flush()
return subprocess.call(args=command, shell=True)
def exercise(args=None):
from cStringIO import StringIO
import sys
if (args is None): args = sys.argv[1:]
verbose = "--verbose" in args
#
if ("--simple" in args):
fb = fully_buffered_simple
else:
fb = fully_buffered
#
for command in ["echo hello world", ("echo", "hello", "world")]:
for result in [fb(command=command).raise_if_errors(),
fb(command=command, join_stdout_stderr=True),
go(command=command)]:
if (verbose): print result.stdout_lines
assert result.stdout_lines == ["hello world"]
#
if (os.path.isfile("/bin/ls")):
for command in ["/bin/ls /bin", ("/bin/ls", "/bin")]:
result = fb(command=command).raise_if_errors()
if (verbose): print result.stdout_lines
assert "ls" in result.stdout_lines
if (os.path.isfile("/usr/bin/wc")):
for command in ["/usr/bin/wc -l", ("/usr/bin/wc", "-l")]:
result = fb(command=command).raise_if_errors()
if (verbose): print result.stdout_lines
assert [s.strip() for s in result.stdout_lines] == ["0"]
result = fb(command=command, stdin_lines=["hello"]) \
.raise_if_errors()
if (verbose): print result.stdout_lines
assert [s.strip() for s in result.stdout_lines] == ["1"]
result = fb(command=command, stdin_lines=["hello", "world"]) \
.raise_if_errors()
if (verbose): print result.stdout_lines
assert [s.strip() for s in result.stdout_lines] == ["2"]
result = fb(command=command, stdin_lines="hello\nworld\nbye\n") \
.raise_if_errors()
if (verbose): print result.stdout_lines
assert [s.strip() for s in result.stdout_lines] == ["3"]
#
if (os.name == "nt"):
result = fb(command="dir /?").raise_if_errors()
if (verbose): print result.stdout_lines
assert len(result.stdout_lines) > 0
windir = os.environ.get("windir", None)
if (windir is not None and windir.find(" ") < 0):
result = fb(command="dir "+windir).raise_if_errors()
if (verbose): print result.stdout_lines
assert len(result.stdout_lines) > 0
#
pyexe = sys.executable
assert pyexe.count('"') == 0
pyexe = '"' + pyexe + '"'
if (os.name == "nt"):
pyexe = "call " + pyexe
#
if (os.environ.has_key("PYTHONPATH")):
if (not hasattr(os, "unsetenv")):
os.environ["PYTHONPATH"] = ""
else:
del os.environ["PYTHONPATH"]
if (os.name == "nt"):
result = fb(command="set").raise_if_errors()
elif (os.path.isfile("/usr/bin/printenv")):
result = fb(command="/usr/bin/printenv").raise_if_errors()
else:
result = None
if (result is not None):
if (verbose): print result.stdout_lines
for line in result.stdout_lines:
assert not line.startswith("PYTHONPATH") or line == "PYTHONPATH="
#
for stdout_splitlines in [True, False]:
result = fb(
command="%s -V" % pyexe,
stdout_splitlines=stdout_splitlines).raise_if_output()
if (verbose): print result.stderr_lines
assert result.stderr_lines == ["Python " + sys.version.split()[0]]
if (stdout_splitlines):
assert result.stdout_buffer is None
assert result.stdout_lines == []
else:
assert result.stdout_buffer == ""
assert result.stdout_lines is None
result = go(command="%s -V" % pyexe)
if (verbose): print result.stdout_lines
assert result.stdout_lines == ["Python " + sys.version.split()[0]]
result = fb(
command='%s -c "print 3+4"' % pyexe).raise_if_errors()
if (verbose): print result.stdout_lines
assert result.stdout_lines == ["7"]
command = command = pyexe \
+ ' -c "import sys; print len(sys.stdin.read().splitlines())"'
result = fb(command=command).raise_if_errors()
if (verbose): print result.stdout_lines
assert result.stdout_lines == ["0"]
result = fb(command=command, stdin_lines=["hello"]) \
.raise_if_errors()
if (verbose): print result.stdout_lines
assert result.stdout_lines == ["1"]
result = fb(command=command, stdin_lines=["hello", "world"]) \
.raise_if_errors()
if (verbose): print result.stdout_lines
assert result.stdout_lines == ["2"]
result = fb(command=command, stdin_lines="hello\nworld\nbye\n") \
.raise_if_errors()
if (verbose): print result.stdout_lines
assert result.stdout_lines == ["3"]
if ("--quick" in args):
n_lines_o = 10000
else:
n_lines_o = 1000000
if (fb is fully_buffered_simple):
n_lines_e = 500 # Windows blocks if this value is greater than 701
else:
n_lines_e = 10000
result = fb(
command=command, stdin_lines=[str(i) for i in xrange(n_lines_o)]) \
.raise_if_errors()
if (verbose): print result.stdout_lines
assert result.stdout_lines == [str(n_lines_o)]
command = pyexe \
+ ' -c "import sys; sys.stderr.write(sys.stdin.read())"'
result = fb(command=command, stdin_lines="Hello\nWorld\nBye\n") \
.raise_if_output()
s = StringIO()
result.show_stderr(out=s, prefix="%(")
if (verbose): sys.stdout.write(s.getvalue())
assert s.getvalue() == """\
%(Hello
%(World
%(Bye
"""
cat_command = command = pyexe \
+ ' -c "import sys; sys.stdout.write(sys.stdin.read())"'
result = fb(command=command, stdin_lines="hello\nworld\nbye\n") \
.raise_if_errors()
s = StringIO()
result.show_stdout(out=s, prefix=">:")
if (verbose): sys.stdout.write(s.getvalue())
assert s.getvalue() == """\
>:hello
>:world
>:bye
"""
result = fb(
command=command, stdin_lines=[str(i) for i in xrange(n_lines_o)]) \
.raise_if_errors()
if (verbose): print result.stdout_lines[:5], result.stdout_lines[-5:]
assert len(result.stdout_lines) == n_lines_o
assert result.stdout_lines[:5] == ["0","1","2","3","4"]
assert result.stdout_lines[-5:] == [str(s)
for s in xrange(n_lines_o-5, n_lines_o)]
command = pyexe \
+ ' -c "import sys; sys.stderr.write(sys.stdin.read())"'
result = fb(
command=command, stdin_lines=[str(i) for i in xrange(n_lines_e,0,-1)])
assert len(result.stdout_lines) == 0
if (verbose): print result.stderr_lines[:5], result.stderr_lines[-5:]
assert len(result.stderr_lines) == n_lines_e
assert result.stderr_lines[:5] == [str(s)
for s in xrange(n_lines_e, n_lines_e-5, -1)]
assert result.stderr_lines[-5:] == ["5","4","3","2","1"]
command = pyexe + "; ".join((''' -c "\
import sys, os
lines = sys.stdin.read()
sys.stdout.write(lines)
sys.stdout.flush()
lines = lines.splitlines()[:%d]
lines.reverse()
nl = chr(%d)
sys.stderr.write(nl.join(lines)+nl)
sys.stderr.flush()"''' % (n_lines_e, ord("\n"))).splitlines())
result = fb(
command=command, stdin_lines=[str(i) for i in xrange(n_lines_o)])
if (verbose): print result.stdout_lines[:5], result.stdout_lines[-5:]
if (verbose): print result.stderr_lines[:5], result.stderr_lines[-5:]
assert len(result.stdout_lines) == n_lines_o
assert result.stdout_lines[:5] == ["0","1","2","3","4"]
assert result.stdout_lines[-5:] == [str(s)
for s in xrange(n_lines_o-5, n_lines_o)]
assert len(result.stderr_lines) == n_lines_e
assert result.stderr_lines[:5] == [str(s)
for s in xrange(n_lines_e-1, n_lines_e-6, -1)]
assert result.stderr_lines[-5:] == ["4","3","2","1","0"]
result = go(
command=command, stdin_lines=[str(i) for i in xrange(n_lines_o)])
if (verbose): print result.stdout_lines[:5], result.stdout_lines[-5:]
assert len(result.stdout_lines) == n_lines_o + n_lines_e
assert result.stdout_lines[:5] == ["0","1","2","3","4"]
assert result.stdout_lines[-5:] == ["4","3","2","1","0"]
#
try: fb(command="C68649356116218352").raise_if_errors()
except RuntimeError, e:
if (verbose): print e
assert str(e).startswith("child process stderr output:\n")
else: raise Exception_expected
#
for stdout_splitlines in [True, False]:
for n,b in [(10,20),(11,23),(12,26),(13,29)]:
try:
fb(
command=cat_command,
stdin_lines=[str(i) for i in xrange(n)],
stdout_splitlines=stdout_splitlines).raise_if_output()
except RuntimeError, e:
if (verbose): print e
assert str(e).startswith("unexpected child process output:\n")
if (stdout_splitlines):
if (n != 13):
assert str(e).endswith(str(n-1))
else:
assert str(e).endswith(" remaining 3 lines omitted.")
else:
assert str(e).endswith(" length of output: %d bytes" % b)
else: raise Exception_expected
#
fb(command=cat_command).raise_if_errors_or_output()
#
result = fb(command=["nslookup", "localhost"])
if (verbose):
print result.stdout_lines
print result.stderr_lines
#
while ("--forever" in args): pass
#
print "OK"
if (__name__ == "__main__"):
exercise()
| mit | -5,819,637,000,614,003,000 | 34.283654 | 78 | 0.630127 | false |
sirmmo/UF3 | uf3/urls.py | 1 | 1111 | from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'biz.views.index', name='home'),
url(r'^map.geojson$', 'biz.views.geojson', name='map'),
url(r'^stats$', "biz.views.stats", name="stats"),
url(r'^stats.json$', "biz.views.complex", name="stats_json"),
url(r'^add$', "biz.views.add", name="add_venue"),
url(r'^edit/(?P<business_id>\d+)$', "biz.views.edit", name="edit_venue"),
url(r'^update/(?P<business_id>\d+)$', "biz.views.update", name="edit_venue"),
url(r'^data.csv$','biz.views.get_csv',name="csv"),
url(r'^login$','biz.views.login',name="login_page"),
url(r'^logout$','biz.views.logout',name="logout_page"),
url(r'^accounts/profile','biz.views.profile',name="profile_redirect"),
url(r'^users/(?P<username>\w+)$','biz.views.user',name="profile"),
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url('', include('social.apps.django_app.urls', namespace='social')),
url(r'^admin/', include(admin.site.urls)),
)
| mit | -148,059,056,320,776,740 | 40.148148 | 81 | 0.630963 | false |
charlyoleg/Cnc25D | cnc25d/export_2d.py | 1 | 8072 | # export_2d.py
# functions to help to generate 2D dxf and svg plan.
# created by charlyoleg on 2013/05/31
#
# (C) Copyright 2013 charlyoleg
#
# This file is part of the Cnc25D Python package.
#
# Cnc25D is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cnc25D is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cnc25D. If not, see <http://www.gnu.org/licenses/>.
"""
export_2d.py provides functions to create DXF file from a FreeCAD Part Oject
"""
################################################################
# header for Python / FreeCAD compatibility
################################################################
import importing_freecad
importing_freecad.importing_freecad()
#print("FreeCAD.Version:", FreeCAD.Version())
#FreeCAD.Console.PrintMessage("Hello from PrintMessage!\n") # avoid using this method because it is not printed in the FreeCAD GUI
################################################################
# import
################################################################
import Part
from FreeCAD import Base
import importDXF
import Drawing
#import FreeCADGui
################################################################
# export_2d sub-functions
################################################################
def draw_rectangle(ai_position_x, ai_position_y, ai_size_x, ai_size_y):
p1 = Base.Vector(ai_position_x+0*ai_size_x, ai_position_y+0*ai_size_y, 0)
p2 = Base.Vector(ai_position_x+1*ai_size_x, ai_position_y+0*ai_size_y, 0)
p3 = Base.Vector(ai_position_x+1*ai_size_x, ai_position_y+1*ai_size_y, 0)
p4 = Base.Vector(ai_position_x+0*ai_size_x, ai_position_y+1*ai_size_y, 0)
r_rectangle_outline=[]
r_rectangle_outline.append(Part.makeLine(p1, p2))
r_rectangle_outline.append(Part.makeLine(p2, p3))
r_rectangle_outline.append(Part.makeLine(p3, p4))
r_rectangle_outline.append(Part.makeLine(p4, p1))
#r_rectangle = Part.Face(Part.Wire(r_rectangle_outline))
r_rectangle = r_rectangle_outline
return(r_rectangle)
def draw_gauge(ai_drawing_length, ai_drawing_height, ai_representation_max, ai_representation_value, ai_position_x, ai_position_y):
l_gauge_value = ai_drawing_length*ai_representation_value/float(ai_representation_max)
#print("dbg067: l_gauge_value:", l_gauge_value)
r_gauge = []
r_gauge.extend(draw_rectangle(ai_position_x-ai_drawing_height/2.0, ai_position_y, ai_drawing_length+ai_drawing_height, ai_drawing_height))
r_gauge.extend(draw_rectangle(ai_position_x, ai_position_y+ai_drawing_height/4.0, l_gauge_value, ai_drawing_height/2.0))
return(r_gauge)
################################################################
# export_2d API
################################################################
def export_to_dxf_abandoned(ai_solid, ai_vector, ai_depth, ai_output_file): # it works only the FreeCAD Gui
""" [Obsolete] create a DXF of a slice of FreeCAD Part Object
"""
l_slices = ai_solid.slice(ai_vector, ai_depth)
l_doc = App.newDocument("tmp_doc")
i=0
for l_shape in l_slices:
i += 1
l_obj = l_doc.addObject("Part::Feature","MyShape{:02d}".format(i))
#l_doc.MyShape.Shape = l_shape
#App.ActiveDocument.MyShape.Shape = l_shape
l_obj.Shape = l_shape
#l_doc.recompute()
l_objects = App.ActiveDocument.Objects
#l_objects = FreeCAD.ActiveDocument.Objects
# this work with the gui but not in pure python script
# Suspect root cause:
# /usr/lib/freecad/Mod/Draft/importDXF.py line:49
# it seems it doesn't detect the gui is off
importDXF.export(l_objects, ai_output_file)
return(1)
def export_to_dxf(ai_solid, ai_vector, ai_depth, ai_output_file):
""" create a DXF of a slice of FreeCAD Part Object
"""
l_slice = Part.makeCompound(ai_solid.slice(ai_vector, ai_depth)) # slice the plank in the ai_vector plan at a the height ai_depth
r_dxf = Drawing.projectToDXF(l_slice, ai_vector)
#r_dxf = Drawing.projectToDXF(ai_solid, ai_vector) # works also :)
fh_output = open(ai_output_file, 'w')
fh_output.write(r_dxf)
fh_output.close()
return(1)
def export_to_svg(ai_solid, ai_vector, ai_depth, ai_output_file):
""" create a SVG of a slice of FreeCAD Part Object. The generated SVG is incomplete. SVG header must be added to it to be opened by Inkscape
"""
l_slice = Part.makeCompound(ai_solid.slice(ai_vector, ai_depth)) # slice the plank in the ai_vector plan at a the height ai_depth
r_dxf = Drawing.projectToSVG(l_slice, ai_vector) # it generates a snippet of svg not directly usable by Inkscape. It needs the svg head and document markers.
#r_dxf = Drawing.projectToSVG(ai_solid, ai_vector) # works also :)
fh_output = open(ai_output_file, 'w')
fh_output.write(r_dxf)
fh_output.close()
return(1)
def export_xyz_to_dxf(ai_solid, ai_size_x, ai_size_y, ai_size_z, ai_xy_slice_list, ai_xz_slice_list, ai_yz_slice_list, ai_output_file):
""" Cut a FreeCAD Part Object in many slices in the three directions X, Y and Z and put all those slices in a DXF file
"""
# calculate the space between two drawings
l_space = max(ai_size_x/5.0, ai_size_y/5.0, ai_size_z/5.0)
#
vec_z_unit = Base.Vector(0,0,1)
#
l_slice_list = []
l_pos_y = 0
for lo in ['xy','xz','yz']:
#l_solid = ai_solid
l_solid = ai_solid.copy()
l_depth_list = []
l_shift_x = 0
l_gauge_max = 0
if(lo=='xy'):
l_solid.rotate(Base.Vector(ai_size_x/2.0, ai_size_y/2.0, ai_size_z/2.0), Base.Vector(0,0,1), 0)
l_solid.translate(Base.Vector(0,0,0)) # place the module corner at origin (0,0,0)
l_solid.translate(Base.Vector(0,2*ai_size_z+7*l_space,0))
l_pos_y = 2*ai_size_z+6*l_space
l_depth_list = ai_xy_slice_list
l_shift_x = ai_size_x
l_gauge_max = ai_size_z
elif(lo=='xz'):
l_solid.rotate(Base.Vector(ai_size_x/2.0, ai_size_y/2.0, ai_size_z/2.0), Base.Vector(1,0,0), -90)
l_solid.translate(Base.Vector((ai_size_x-ai_size_x)/2.0, (ai_size_z-ai_size_y)/2.0, (ai_size_y-ai_size_z)/2.0)) # place the module corner at origin (0,0,0)
l_solid.translate(Base.Vector(0,1*ai_size_z+4*l_space,0))
l_pos_y = 1*ai_size_z+3*l_space
l_depth_list = ai_xz_slice_list
l_shift_x = ai_size_x
l_gauge_max = ai_size_y
elif(lo=='yz'):
l_solid.rotate(Base.Vector(ai_size_x/2.0, ai_size_y/2.0, ai_size_z/2.0), Base.Vector(0,0,1), -90)
l_solid.rotate(Base.Vector(ai_size_x/2.0, ai_size_y/2.0, ai_size_z/2.0), Base.Vector(1,0,0), -90)
l_solid.translate(Base.Vector((ai_size_y-ai_size_x)/2.0, (ai_size_z-ai_size_y)/2.0, (ai_size_x-ai_size_z)/2.0)) # place the module corner at origin (0,0,0)
l_solid.translate(Base.Vector(0,l_space,0))
l_pos_y = 0*ai_size_z+0*l_space
l_depth_list = ai_yz_slice_list
l_shift_x = ai_size_y
l_gauge_max = ai_size_x
l_pos_x = 0
for l_depth in l_depth_list:
#print("dbg163: l_shift_x l_space l_gauge_max l_depth l_pos_x l_pos_y", l_shift_x, l_space, l_gauge_max, l_depth, l_pos_x, l_pos_y)
l_slice_list.extend(draw_gauge(l_shift_x, l_space/2.0, l_gauge_max, l_depth, l_pos_x, l_pos_y))
l_pos_x += l_shift_x+2*l_space
ll_depth = l_depth
if(lo=='xz'):
ll_depth = ai_size_y-l_depth
#print("dbg168: ll_depth:", ll_depth)
l_slice_list.extend(l_solid.slice(vec_z_unit, ll_depth))
l_solid.translate(Base.Vector(l_shift_x+2*l_space,0,0))
l_slice = Part.makeCompound(l_slice_list)
# temporary commented because of OpenCascade bug
#r_dxf = Drawing.projectToDXF(l_slice, vec_z_unit)
##r_dxf = Drawing.projectToDXF(ai_solid, ai_vector)
#fh_output = open(ai_output_file, 'w')
#fh_output.write(r_dxf)
#fh_output.close()
return(1)
| gpl-3.0 | 1,595,689,253,068,316,200 | 43.596685 | 161 | 0.637141 | false |
matthiasdiener/spack | var/spack/repos/builtin/packages/openspeedshop-utils/package.py | 1 | 14855 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
##############################################################################
# Copyright (c) 2015-2018 Krell Institute. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import spack
import spack.store
import os
import os.path
class OpenspeedshopUtils(CMakePackage):
"""OpenSpeedShop is a community effort by The Krell Institute with
current direct funding from DOEs NNSA. It builds on top of a
broad list of community infrastructures, most notably Dyninst
and MRNet from UW, libmonitor from Rice, and PAPI from UTK.
OpenSpeedShop is an open source multi platform Linux performance
tool which is targeted to support performance analysis of
applications running on both single node and large scale IA64,
IA32, EM64T, AMD64, PPC, ARM, Power8, Intel Phi, Blue Gene and
Cray platforms. OpenSpeedShop development is hosted by the Krell
Institute. The infrastructure and base components of OpenSpeedShop
are released as open source code primarily under LGPL.
openspeedshop-utils is a package that does not have the
qt3 gui. It was created to avoid a conflict between
openspeedshop and cbtf-argonavis-gui based on the fact
that spack will not allow a qt3 and qt4/qt5 dependency in a packages
dependency tree.
"""
homepage = "http://www.openspeedshop.org"
url = "https://github.com/OpenSpeedShop/openspeedshop.git"
# Use when the git repository is available
version('2.3.1.4', branch='2.3.1.4',
git='https://github.com/OpenSpeedShop/openspeedshop.git')
version('2.3.1.3', branch='2.3.1.3',
git='https://github.com/OpenSpeedShop/openspeedshop.git')
version('develop', branch='master',
git='https://github.com/OpenSpeedShop/openspeedshop.git')
variant('runtime', default=False,
description="build only the runtime libraries and collectors.")
variant('cti', default=False,
description="Build MRNet with the CTI startup option")
variant('crayfe', default=False,
description="build only the FE tool using the runtime_dir \
to point to target build.")
variant('cuda', default=False,
description="build with cuda packages included.")
variant('build_type', default='None', values=('None'),
description='CMake build type')
# MPI variants
variant('openmpi', default=False,
description="Build mpi collector for openmpi \
MPI when variant is enabled.")
variant('mpt', default=False,
description="Build mpi collector for SGI \
MPT MPI when variant is enabled.")
variant('mvapich2', default=False,
description="Build mpi collector for mvapich2\
MPI when variant is enabled.")
variant('mvapich', default=False,
description="Build mpi collector for mvapich\
MPI when variant is enabled.")
variant('mpich2', default=False,
description="Build mpi collector for mpich2\
MPI when variant is enabled.")
variant('mpich', default=False,
description="Build mpi collector for mpich\
MPI when variant is enabled.")
depends_on("[email protected]:", type='build')
# Dependencies for openspeedshop that are common to all
# the variants of the OpenSpeedShop build
depends_on("libtool", type='build')
depends_on("bison", type='build')
depends_on("[email protected]", type='build')
# For binutils
depends_on("binutils", when='@develop', type='build')
depends_on("[email protected]", when='@2.3.1.3:9999', type='build')
depends_on("elf", type="link")
depends_on("libdwarf")
depends_on("sqlite")
# For boost
depends_on("[email protected]:", when='@develop')
depends_on("[email protected]", when='@2.3.1.3:9999')
depends_on("dyninst@develop", when='@develop')
depends_on("[email protected]", when='@2.3.1.3:9999')
depends_on("python", when='@develop')
depends_on("[email protected]:2.7.15", when='@2.3.1.3:9999')
depends_on("libxml2")
# Dependencies for the openspeedshop cbtf packages.
depends_on("cbtf@develop", when='@develop')
depends_on("[email protected]:9999", when='@2.3.1.3:9999')
depends_on("cbtf-krell@develop", when='@develop')
depends_on("[email protected]:9999", when='@2.3.1.3:9999')
depends_on('cbtf-krell@develop+crayfe', when='@develop+crayfe')
depends_on('[email protected]:9999+crayfe', when='@2.3.1.3:9999+crayfe')
depends_on('cbtf-krell@develop+cti', when='@develop+cti')
depends_on('[email protected]:9999+cti', when='@2.3.1.3:9999+cti')
depends_on('cbtf-krell@develop+mpich', when='@develop+mpich')
depends_on('[email protected]:9999+mpich', when='@2.3.1.3:9999+mpich')
depends_on('cbtf-krell@develop+mpich2', when='@develop+mpich2')
depends_on('[email protected]:9999+mpich2', when='@2.3.1.3:9999+mpich2')
depends_on('cbtf-krell@develop+mpt', when='@develop+mpt')
depends_on('[email protected]:9999+mpt', when='@2.3.1.3:9999+mpt')
depends_on('cbtf-krell@develop+mvapich', when='@develop+mvapich')
depends_on('[email protected]:9999+mvapich', when='@2.3.1.3:9999+mvapich')
depends_on('cbtf-krell@develop+mvapich2', when='@develop+mvapich2')
depends_on('[email protected]:9999+mvapich2', when='@2.3.1.3:9999+mvapich2')
depends_on('cbtf-krell@develop+openmpi', when='@develop+openmpi')
depends_on('[email protected]:9999+openmpi', when='@2.3.1.3:9999+openmpi')
depends_on("cbtf-argonavis@develop", when='@develop+cuda')
depends_on("[email protected]:9999", when='@2.3.1.3:9999+cuda')
# For MRNet
depends_on("[email protected]:+cti", when='@develop+cti')
depends_on("[email protected]:+lwthreads", when='@develop')
depends_on("[email protected]:+cti", when='@2.3.1.3:9999+cti')
depends_on("[email protected]:+lwthreads", when='@2.3.1.3:9999')
parallel = False
build_directory = 'build_openspeedshop'
def set_CrayLoginNode_cmakeOptions(self, spec, cmakeOptions):
# Appends to cmakeOptions the options that will enable the appropriate
# Cray login node libraries
CrayLoginNodeOptions = []
rt_platform = "cray"
# How do we get the compute node (CNL) cbtf package install
# directory path?
# spec['cbtf'].prefix is the login node value for this build, as
# we only get here when building the login node components and
# that is all that is known to spack.
be_ck = spack.store.db.query_one('cbtf-krell arch=cray-CNL-haswell')
# Equivalent to install-tool cmake arg:
# '-DCBTF_KRELL_CN_RUNTIME_DIR=%s'
# % <base dir>/cbtf_v2.3.1.release/compute)
CrayLoginNodeOptions.append('-DCBTF_KRELL_CN_RUNTIME_DIR=%s'
% be_ck.prefix)
CrayLoginNodeOptions.append('-DRUNTIME_PLATFORM=%s'
% rt_platform)
cmakeOptions.extend(CrayLoginNodeOptions)
def cmake_args(self):
# Appends base options to cmake_args
spec = self.spec
compile_flags = "-O2 -g"
cmake_args = []
# Indicate building cbtf vers (transfer rawdata files)
instrumentor_setting = "cbtf"
if spec.satisfies('+runtime'):
self.set_defaultbase_cmakeOptions(spec, cmake_args)
cmake_args.extend(
['-DCMAKE_CXX_FLAGS=%s' % compile_flags,
'-DCMAKE_C_FLAGS=%s' % compile_flags,
'-DINSTRUMENTOR=%s' % instrumentor_setting,
'-DCBTF_DIR=%s' % spec['cbtf'].prefix,
'-DCBTF_KRELL_DIR=%s' % spec['cbtf-krell'].prefix,
'-DMRNET_DIR=%s' % spec['mrnet'].prefix])
else:
# Appends base options to cmake_args
self.set_defaultbase_cmakeOptions(spec, cmake_args)
cmake_args.extend(
['-DCMAKE_CXX_FLAGS=%s' % compile_flags,
'-DCMAKE_C_FLAGS=%s' % compile_flags,
'-DINSTRUMENTOR=%s' % instrumentor_setting,
'-DSQLITE3_DIR=%s' % spec['sqlite'].prefix,
'-DCBTF_DIR=%s' % spec['cbtf'].prefix,
'-DCBTF_KRELL_DIR=%s' % spec['cbtf-krell'].prefix,
'-DMRNET_DIR=%s' % spec['mrnet'].prefix])
if spec.satisfies('+crayfe'):
# We need to build target/compute node
# components/libraries first then pass
# those libraries to the openspeedshop
# login node build
self.set_CrayLoginNode_cmakeOptions(spec, cmake_args)
cmake_args.extend(['-DBUILD_QT3_GUI=FALSE'])
return cmake_args
def set_defaultbase_cmakeOptions(self, spec, cmakeOptions):
# Appends to cmakeOptions the options that will enable
# the appropriate base level options to the openspeedshop
# cmake build.
python_exe = spec['python'].command.path
python_library = spec['python'].libs[0]
python_include = spec['python'].headers.directories[0]
BaseOptions = []
BaseOptions.append('-DBINUTILS_DIR=%s' % spec['binutils'].prefix)
BaseOptions.append('-DLIBELF_DIR=%s' % spec['elf'].prefix)
BaseOptions.append('-DLIBDWARF_DIR=%s' % spec['libdwarf'].prefix)
BaseOptions.append('-DPYTHON_EXECUTABLE=%s' % python_exe)
BaseOptions.append('-DPYTHON_INCLUDE_DIR=%s' % python_include)
BaseOptions.append('-DPYTHON_LIBRARY=%s' % python_library)
BaseOptions.append('-DBoost_NO_SYSTEM_PATHS=TRUE')
BaseOptions.append('-DBoost_NO_BOOST_CMAKE=TRUE')
BaseOptions.append('-DBOOST_ROOT=%s' % spec['boost'].prefix)
BaseOptions.append('-DBoost_DIR=%s' % spec['boost'].prefix)
BaseOptions.append('-DBOOST_LIBRARYDIR=%s' % spec['boost'].prefix.lib)
BaseOptions.append('-DDYNINST_DIR=%s' % spec['dyninst'].prefix)
cmakeOptions.extend(BaseOptions)
def set_mpi_cmakeOptions(self, spec, cmakeOptions):
# Appends to cmakeOptions the options that will enable
# the appropriate MPI implementations
MPIOptions = []
# openmpi
if spec.satisfies('+openmpi'):
MPIOptions.append('-DOPENMPI_DIR=%s' % spec['openmpi'].prefix)
# mpich
if spec.satisfies('+mpich'):
MPIOptions.append('-DMPICH_DIR=%s' % spec['mpich'].prefix)
# mpich2
if spec.satisfies('+mpich2'):
MPIOptions.append('-DMPICH2_DIR=%s' % spec['mpich2'].prefix)
# mvapich
if spec.satisfies('+mvapich'):
MPIOptions.append('-DMVAPICH_DIR=%s' % spec['mvapich'].prefix)
# mvapich2
if spec.satisfies('+mvapich2'):
MPIOptions.append('-DMVAPICH2_DIR=%s' % spec['mvapich2'].prefix)
# mpt
if spec.satisfies('+mpt'):
MPIOptions.append('-DMPT_DIR=%s' % spec['mpt'].prefix)
cmakeOptions.extend(MPIOptions)
def setup_environment(self, spack_env, run_env):
"""Set up the compile and runtime environments for a package."""
# Find Dyninst library path, this is needed to
# set the DYNINSTAPI_RT_LIB library which is
# required for OpenSpeedShop to find loop level
# performance information
dyninst_libdir = find_libraries('libdyninstAPI_RT',
root=self.spec['dyninst'].prefix,
shared=True, recursive=True)
# Set Dyninst RT library path to support OSS loop resolution code
run_env.set('DYNINSTAPI_RT_LIB', dyninst_libdir)
# Find openspeedshop library path
oss_libdir = find_libraries(
'libopenss-framework',
root=self.spec['openspeedshop-utils'].prefix,
shared=True, recursive=True)
run_env.prepend_path('LD_LIBRARY_PATH',
os.path.dirname(oss_libdir.joined()))
run_env.set('OPENSS_RAWDATA_DIR', '.')
cbtf_mc = '/sbin/cbtf_mrnet_commnode'
cbtf_lmb = '/sbin/cbtf_libcbtf_mrnet_backend'
run_env.set('XPLAT_RSH', 'ssh')
run_env.set('MRNET_COMM_PATH',
join_path(self.spec['cbtf-krell'].prefix + cbtf_mc))
run_env.set('CBTF_MRNET_BACKEND_PATH',
join_path(self.spec['cbtf-krell'].prefix + cbtf_lmb))
run_env.prepend_path('PATH', self.spec['mrnet'].prefix.bin)
run_env.prepend_path('PATH', self.spec['cbtf-krell'].prefix.bin)
run_env.prepend_path('PATH', self.spec['cbtf-krell'].prefix.sbin)
run_env.prepend_path('PATH', self.spec['python'].prefix.bin)
| lgpl-2.1 | -6,906,376,639,101,847,000 | 41.809798 | 81 | 0.618243 | false |
khshim/lemontree | example/cifar10_dcgan.py | 1 | 9153 | import os
import time
import numpy as np
import theano
import theano.tensor as T
import scipy.misc
from theano.sandbox.rng_mrg import MRG_RandomStreams as MRG
from lemontree.data.cifar10 import CIFAR10
from lemontree.graphs.graph import SimpleGraph
from lemontree.generators.generator import SimpleGenerator
from lemontree.controls.history import SimpleHistory
from lemontree.graphs.graph import SimpleGraph
from lemontree.layers.activation import ReLU, Sigmoid, Tanh, Softmax
from lemontree.layers.dense import DenseLayer
from lemontree.layers.convolution import Convolution3DLayer, TransposedConvolution3DLayer
from lemontree.layers.pool import Upscaling3DLayer
from lemontree.layers.normalization import BatchNormalization3DLayer, BatchNormalization1DLayer
from lemontree.layers.dropout import DropoutLayer
from lemontree.layers.shape import ReshapeLayer, Flatten3DLayer
from lemontree.initializers import GlorotNormal, Normal
from lemontree.objectives import BinaryCrossentropy, BinaryAccuracy, CategoricalCrossentropy, CategoricalAccuracy
from lemontree.optimizers import Adam
from lemontree.parameters import SimpleParameter
from lemontree.utils.param_utils import filter_params_by_tags, print_tags_in_params, print_params_num
from lemontree.utils.type_utils import merge_dicts
from lemontree.utils.graph_utils import get_inputs_of_variables
from lemontree.utils.data_utils import split_data
np.random.seed(9999)
# base_datapath = 'C:/Users/skhu2/Dropbox/Project/data/'
# base_datapath = 'D:/Dropbox/Project/data/'
base_datapath = '/home/khshim/data/'
batch_size = 100
experiment_name = 'cifar10_dcgan'
#================Prepare Data================#
cifar10 = CIFAR10(base_datapath, 'tensor')
train_data = cifar10.train_data
train_data = train_data * 2 - 1
train_gen = SimpleGenerator([train_data], batch_size, 'train')
#================Build Graph================#
z = T.fmatrix('Z') # (batch_size, 100)
x = T.ftensor4('X') # (batch_size, 3, 28, 28)
# You can use either upscaling + conv / transposed conv
generator = SimpleGraph(experiment_name + '_gen', batch_size)
generator.add_layer(DenseLayer((100,), (8192,), use_bias=False), is_start=True)
generator.add_layer(BatchNormalization1DLayer((8192,), 0.9))
generator.add_layer(ReLU())
generator.add_layer(ReshapeLayer((8192,), (512, 4, 4)))
#generator.add_layer(Upscaling3DLayer((512,4,4), (512,8,8), (2,2)))
#generator.add_layer(Convolution3DLayer((512,8,8), (256,8,8), (3,3), 'half', use_bias=False))
generator.add_layer(TransposedConvolution3DLayer((512,4,4), (256,8,8), (3,3), 'half', (2,2), use_bias=False))
generator.add_layer(BatchNormalization3DLayer((256,8,8), 0.9))
generator.add_layer(ReLU())
#generator.add_layer(Upscaling3DLayer((256,8,8), (256,16,16), (2,2)))
#generator.add_layer(Convolution3DLayer((256,16,16), (128,16,16), (3,3), 'half', use_bias=False))
generator.add_layer(TransposedConvolution3DLayer((256,8,8), (128,16,16), (3,3), 'half', (2,2), use_bias=False))
generator.add_layer(BatchNormalization3DLayer((128,16,16), 0.9))
generator.add_layer(ReLU())
#generator.add_layer(Upscaling3DLayer((128,16,16), (128,32,32), (2,2)))
#generator.add_layer(Convolution3DLayer((128,32,32), (3,32,32), (3,3), 'half')
generator.add_layer(TransposedConvolution3DLayer((128,16,16), (3,32,32), (3,3), 'half', (2,2)))
generator.add_layer(Tanh())
gen_params = generator.get_params()
gen_updates = generator.get_updates()
discriminator = SimpleGraph(experiment_name + '_disc', batch_size)
discriminator.add_layer(Convolution3DLayer((3,32,32), (128,16,16), (3,3), 'half', (2,2)), is_start=True)
discriminator.add_layer(ReLU(0.2))
discriminator.add_layer(Convolution3DLayer((128,16,16), (256,8,8), (3,3), 'half', (2,2), use_bias=False))
discriminator.add_layer(BatchNormalization3DLayer((256,8,8), 0.9))
discriminator.add_layer(ReLU(0.2))
discriminator.add_layer(Convolution3DLayer((256,8,8), (512,4,4), (3,3), 'half', (2,2), use_bias=False))
discriminator.add_layer(BatchNormalization3DLayer((512,4,4), 0.9))
discriminator.add_layer(ReLU(0.2))
discriminator.add_layer(Flatten3DLayer((512,4,4), (8192,)))
discriminator.add_layer(DenseLayer((8192,), (1,)))
discriminator.add_layer(Sigmoid())
disc_params = discriminator.get_params()
disc_updates = discriminator.get_updates()
#================Make Loss================#
zx, _ = generator.get_output({0:[z]}, -1, 0)
d_x, _ = discriminator.get_output({0:[x]}, -1, 0)
d_zx, _ = discriminator.get_output({0:[zx]}, -1, 0)
disc_loss_real = BinaryCrossentropy(True).get_output(d_x, 1)
disc_loss_fake = BinaryCrossentropy(True).get_output(d_zx, 0)
disc_loss = disc_loss_real + disc_loss_fake
gen_loss = BinaryCrossentropy(True).get_output(d_zx, 1)
#================Initialize================#
# GlorotNormal().initialize_params(filter_params_by_tags(gen_params, ['weight']))
# GlorotNormal().initialize_params(filter_params_by_tags(disc_params, ['weight']))
Normal(0, 0.02).initialize_params(filter_params_by_tags(gen_params, ['weight']))
Normal(0, 0.02).initialize_params(filter_params_by_tags(disc_params, ['weight']))
hist = SimpleHistory(experiment_name + '_history/')
hist.add_keys(['disc_loss', 'gen_loss', 'disc_loss_fake', 'disc_loss_real'])
#================Make Optimizer================#
disc_opt = Adam(0.0002, 0.5, 0.9)
gen_opt = Adam(0.0002, 0.5, 0.9)
disc_opt_updates = disc_opt.get_updates(disc_loss, disc_params)
disc_opt_params = disc_opt.get_params()
gen_opt_updates = gen_opt.get_updates(gen_loss, gen_params)
gen_opt_params = gen_opt.get_params()
#total_params = disc_params + gen_params + disc_opt_params + gen_opt_params
#params_saver = SimpleParameter(total_params, experiment_name + '_params/')
#params_saver.save_params()
#================Compile Functions================#
disc_func = theano.function(inputs=[x,z],
outputs=[gen_loss, disc_loss, disc_loss_real, disc_loss_fake],
updates=disc_opt_updates,
allow_input_downcast=True)
gen_func = theano.function(inputs=[x,z],
outputs=[gen_loss, disc_loss, disc_loss_real, disc_loss_fake],
updates=gen_opt_updates,
allow_input_downcast=True)
get_image = theano.function(inputs=[z],
outputs=zx,
allow_input_downcast=True)
#================Convenient Functions================#
def pretrain():
dl = []
dlr = []
dlf = []
for index in range(train_gen.max_index):
data = train_gen.get_minibatch(index)[0]
latent = np.random.uniform(-1, 1, (batch_size, 100))
func_outputs = disc_func(data, latent, y_disc_np)
dl.append(func_outputs[1])
dlr.append(func_outputs[2])
dlf.append(func_outputs[3])
print('Pretrain disc_loss', np.mean(np.asarray(dl)))
print('Pretrain disc_loss_real', np.mean(np.asarray(dlr)))
print('Pretrain disc_loss_fake', np.mean(np.asarray(dlf)))
def train():
dl = []
gl = []
dlr = []
dlf = []
for index in range(0, train_gen.max_index-1, 2):
data = train_gen.get_minibatch(index)[0]
latent = np.random.uniform(-1, 1, (batch_size, 100))
func_outputs = gen_func(data, latent)
dl.append(func_outputs[1])
gl.append(func_outputs[0])
dlr.append(func_outputs[2])
dlf.append(func_outputs[3])
data = train_gen.get_minibatch(index+1)[0]
latent = np.random.uniform(-1, 1, (batch_size, 100))
func_outputs = disc_func(data, latent)
dl.append(func_outputs[1])
gl.append(func_outputs[0])
dlr.append(func_outputs[2])
dlf.append(func_outputs[3])
hist.history['disc_loss'].append(np.mean(np.asarray(dl)))
hist.history['gen_loss'].append(np.mean(np.asarray(gl)))
hist.history['disc_loss_real'].append(np.mean(np.asarray(dlr)))
hist.history['disc_loss_fake'].append(np.mean(np.asarray(dlf)))
result_folder = experiment_name + '_result/'
if not os.path.exists(result_folder):
os.makedirs(result_folder)
def generate(epoch):
latent = np.random.uniform(-1, 1, (batch_size, 100))
generated = (get_image(latent) + 1) / 2
manifold = np.zeros((32*8, 32*8, 3), dtype=theano.config.floatX)
for indx in range(8):
for indy in range(8):
current_img = np.swapaxes(generated[indx * 8 + indy], 0, 2)
current_img = np.swapaxes(current_img, 0, 1)
manifold[indx * 32:(indx+1) * 32, indy * 32:(indy+1) * 32, :] = current_img
manifold = np.asarray(manifold * 255, dtype='int32')
manifold = scipy.misc.toimage(manifold, cmin=0, cmax=255)
scipy.misc.imsave(result_folder + str(epoch) + '.png', manifold)
#================Train================#
# pretrain()
for epoch in range(200):
train_gen.shuffle()
print('... Epoch', epoch)
start_time = time.clock()
train()
if epoch % 2 == 0:
generate(epoch)
end_time = time.clock()
print('...... time:', end_time - start_time)
hist.print_history_of_epoch()
if epoch % 10 == 0:
hist.save_history_to_csv()
#================Test================# | mit | -5,900,204,018,850,788,000 | 38.973799 | 113 | 0.661313 | false |
DistrictDataLabs/logbook | catalog/management/commands/ingest.py | 1 | 1998 | # catalog.management.commands.ingest
# Ingest utility to grab data from a CSV file and insert into database.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Sun Aug 23 21:18:54 2015 -0500
#
# Copyright (C) 2015 District Data Labs
# For license information, see LICENSE.txt
#
# ID: ingest.py [] [email protected] $
"""
Ingest utility to grab data from a CSV file and insert into database.
"""
##########################################################################
## Imports
##########################################################################
import argparse
from collections import Counter
from catalog.parser import ActivityParser
from django.core.management.base import BaseCommand, CommandError
##########################################################################
## Ingest Command
##########################################################################
class Command(BaseCommand):
help = "Ingest data from a CSV file and insert into the database."
def add_arguments(self, parser):
"""
Add command line arguments to the argparse.ArgumentParser.
"""
# Positional Arguments
parser.add_argument(
'data', metavar='PATH', nargs='+', type=argparse.FileType('r'),
help='Activity CSV file with user to action detail'
)
def handle(self, **options):
"""
Handle all command line input and write output to the console.
"""
counts = Counter()
parser = ActivityParser()
for idx, data in enumerate(options['data']):
try:
counts += parser.parse(data)
except Exception as e:
raise CommandError(str(e))
for key, cnt in sorted(counts.items(), key=lambda k: k[0]):
if key == 'rows': continue
self.stdout.write("{}: {}".format(key, cnt))
self.stdout.write("Read {} rows in {} data files.".format(counts['rows'], idx+1))
| apache-2.0 | 5,288,172,935,344,302,000 | 30.21875 | 89 | 0.537538 | false |
seishei/multiprocess | py3.3/multiprocess/util.py | 2 | 9911 | #
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import sys
import functools
import os
import itertools
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from multiprocess.process import current_process, active_children
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocess'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
logging.addLevelName(SUBWARNING, 'SUBWARNING')
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
if current_process()._tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
current_process()._tempdir = tempdir
return current_process()._tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
assert exitpriority is None or type(exitpriority) is int
if obj is not None:
self._weakref = weakref.ref(obj, self)
else:
assert exitpriority is not None
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<Finalize object, dead>'
x = '<Finalize object, callback=%s' % \
getattr(self._callback, '__name__', self._callback)
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitprority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0][0] is not None
else:
f = lambda p : p[0][0] is not None and p[0][0] >= minpriority
items = [x for x in list(_finalizer_registry.items()) if f(x)]
items.sort(reverse=True)
for key, finalizer in items:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=active_children,
current_process=current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p._daemonic:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
| bsd-3-clause | -5,292,359,313,021,024,000 | 28.409496 | 79 | 0.598325 | false |
kilda/MaxiNet | MaxiNet/FrontendServer/server.py | 1 | 14695 | #!/usr/bin/python2
import atexit
import logging
import threading
import time
import Pyro4
from MaxiNet.tools import MaxiNetConfig
Pyro4.config.SOCK_REUSE = True
class NameServer(object):
def __init__(self, config=MaxiNetConfig()):
self.config = config
self.logger = logging.getLogger(__name__)
def start(self):
"""Start namserver instance
"""
Pyro4.config.SERVERTYPE = "thread"
Pyro4.config.THREADPOOL_SIZE = self.config.get_frontend_threads()
self._ns_thread = threading.Thread(target=Pyro4.naming.startNSloop,
kwargs={
"host": self.config.get_nameserver_ip(),
"port": self.config.get_nameserver_port(),
"hmac": self.config.get_nameserver_password()
})
self._ns_thread.daemon = True
self._ns_thread.start()
time.sleep(1)
atexit.register(self.stop)
self.config.register()
def stop(self):
"""Shut down nameserver instance.
"""
self.config.unregister()
class MaxiNetManager(object):
"""Manager class which manages distribution of workers to clusters.
The MaxiNetManager class manages the distribution of workers to clusters
After connecting to the nameserver every Worker registers itself with the
MaxiNetManager instance. Workers can than be reserved by Clusters to
to run Experiments on them. The Cluster has to free the Worker if it doesn't
use it anymore. Note that MaxiNet does not implement any "security" features,
meaning that there is no mechanism in place to prevent a malicious cluster
from messing with Workers that are not reserved for it.
Attributes:
config: instance of class MaxiNetConfig which is registerd on the
nameserver and accessible by clusters, experiments and workers.
logger: logging instance
"""
def __init__(self, config=MaxiNetConfig()):
self.config = config
self._worker_dict = {}
self._worker_dict_lock = threading.Lock()
self._ns = None
self._pyrodaemon = None
self.logger = logging.getLogger(__name__)
self.idents = []
self._monitor_thread = threading.Thread(target=self.monitor_clusters)
self._monitor_thread.daemon = True
self._monitor_thread.start()
@Pyro4.expose
def register_ident(self, ident):
"""Register identifier on manager.
To identify a cluster instance when communicating with the MaxiNetManager
an identifier string is used. The Cluster instance needs to generate
this string and register it with the Manager.
Args:
ident: Identifier string the Cluster instance wants to register
Returns:
True if successful, False if identifier is already registered.
"""
# maybe we should use a lock here
if not ident in self.idents:
self.idents.append(ident)
return True
else:
return False
@Pyro4.expose
def unregister_ident(self, ident):
"""Unregister identifier.
Frees up the identifier string of a cluster instance to use by other
instances. The unregistering instance must not use this string anymore
when communicating with the Manager if it did not reregister it
beforehand.
Args:
ident: Identifier string to unregister
Returns:
True
"""
if ident in self.idents:
self.idents.remove(ident)
return True
@Pyro4.expose
def valid_ident(self, ident):
"""Check if identifier is registerd with manager instance.
Args:
ident: Identifier to check
Returns:
True if identifier is registered, False if not.
"""
if ident in self.idents:
return True
else:
return False
@Pyro4.expose
def monitor_clusters(self):
"""check if the clusters (which allocated workers) are alive
otherwise, deallocate the workers from the cluster
"""
print "Monitoring clusters..."
while(True):
time.sleep(5) #we check all 5 seconds.
clusters = list()
for worker in self._worker_dict.keys():
if (self._worker_dict[worker]["assigned"] != None):
if (not (self._worker_dict[worker]["assigned"] in clusters)):
clusters.append(self._worker_dict[worker]["assigned"])
#iterate over clusters and check if still alive:
for cluster in clusters:
try:
uri = self._ns.lookup(cluster)
cluster_instance = Pyro4.Proxy(uri)
alive = False
if(cluster_instance):
cluster_instance._pyroHmacKey=self.config.get_nameserver_password()
if(cluster_instance.get_status_is_alive()):
alive = True
except Exception as e:
pass
if(not alive):
#we just detected that this cluster is no more alive!
self.logger.warn("Detected a hung cluster. Freeing workers.")
for worker in self._worker_dict.keys():
if(self._worker_dict[worker]["assigned"] == cluster):
pn = self._worker_dict[worker]["pyroname"]+".mnManager"
p = Pyro4.Proxy(self._ns.lookup(pn))
p._pyroHmacKey=self.config.get_nameserver_password()
p.destroy_mininet()
self.free_worker(worker, cluster, True)
self.unregister_ident(cluster)
@Pyro4.expose
def getStatus(self):
""" used to check if the frontend server is still alive.
"""
return True
def start(self):
self.logger.debug("starting up and connecting to %s:%d"
% (self.config.get_nameserver_ip(), self.config.get_nameserver_port()))
#Pyro4.config.HMAC_KEY = self.config.get_nameserver_password()
self._ns = Pyro4.locateNS(self.config.get_nameserver_ip(), self.config.get_nameserver_port(), hmac_key=self.config.get_nameserver_password())
# replace local config with the one from nameserver
pw = self.config.get_nameserver_password()
self.config = Pyro4.Proxy(self._ns.lookup("config"))
self.config._pyroHmacKey=pw
self._pyrodaemon = Pyro4.Daemon(host=self.config.get_nameserver_ip())
self._pyrodaemon._pyroHmacKey=self.config.get_nameserver_password()
uri = self._pyrodaemon.register(self)
self._ns.register("MaxiNetManager", uri)
atexit.register(self._stop)
self.logger.info("startup successful. Waiting for workers to register...")
self._pyrodaemon.requestLoop()
def _stop(self):
self.logger.info("shutting down...")
#
# comment back in if the workerservers should shutdown once the frontend is terminated.
#
#self._worker_dict_lock.acquire()
#workers = self._worker_dict.keys()
#for worker in workers:
# pn = self._worker_dict[worker]["pyroname"]
# self._worker_dict_lock.release()
# p = Pyro4.Proxy(self._ns.lookup(pn))
# p._pyroHmacKey=self.config.get_nameserver_password()
# p.remoteShutdown()
# self._worker_dict_lock.acquire()
#self._worker_dict_lock.release()
#while(len(self.get_workers()) > 0):
# self.logger.debug("waiting for workers to unregister...")
# time.sleep(0.5)
self._ns.remove("MaxiNetManager")
self._pyrodaemon.unregister(self)
self._pyrodaemon.shutdown()
@Pyro4.expose
def stop(self):
"""Stop FrontendServer.
Tries to stop FrontendServer. Fails if there are workers assigned
to a cluster.
returns: True if FrontendServer was successfully stopped, False if not
"""
self._worker_dict_lock.acquire()
if (len(filter(lambda x: not (x["assigned"] is None),
self._worker_dict.values())) > 0):
self.logger.warn("shutdown not possible as there are still \
reserved workers")
self._worker_dict_lock.release()
return False
else:
self._worker_dict_lock.release()
self._stop()
return True
@Pyro4.expose
def worker_signin(self, worker_pyroname, worker_hostname):
"""Register Worker with FrontendServer.
Fails if Worker is already registered.
Args:
worker_pyroname: Pyro Identifier of Worker (String)
worker_hostname: Hostname of Worker
Returns:
True if successful, False if not.
"""
self._worker_dict_lock.acquire()
if(worker_hostname in self._worker_dict):
self._worker_dict_lock.release()
self.logger.warn("failed to register worker %s (pyro: %s) as it is\
already registered."
% (worker_hostname, worker_pyroname))
return False
self._worker_dict[worker_hostname] = {"assigned": None,
"pyroname": worker_pyroname}
self._worker_dict_lock.release()
self.logger.info("new worker signed in: %s (pyro: %s)"
% (worker_hostname, worker_pyroname))
return True
def _is_assigned(self, worker_hostname):
return not (self._worker_dict[worker_hostname]["assigned"] is None)
@Pyro4.expose
def print_worker_status(self):
numWorkers = len(self._worker_dict)
out = ""
out += "MaxiNet Frontend server running at %s\n" % self.config.get_nameserver_ip()
out += "Number of connected workers: %d\n" % numWorkers
if numWorkers > 0:
out += "--------------------------------\n"
for worker_name in self._worker_dict.keys():
status = "free"
if (self._worker_dict[worker_name]["assigned"]):
status = "assigned to %s" % self._worker_dict[worker_name]["assigned"]
out += "%s\t\t%s\n" % (worker_name, status)
return out
@Pyro4.expose
def get_worker_status(self, worker_hostname):
signed_in = False
assigned = None
self._worker_dict_lock.acquire()
if(worker_hostname in self._worker_dict):
signed_in = True
assigned = self._worker_dict[worker_hostname]["assigned"]
self._worker_dict_lock.release()
return (signed_in, assigned)
@Pyro4.expose
def worker_signout(self, worker_hostname):
"""Unregister Worker from FrontendServer.
Fails if worker is still assigned to a cluster.
Returns:
True if successful, False if not.
"""
self._worker_dict_lock.acquire()
if(worker_hostname in self._worker_dict):
if(not self._is_assigned(worker_hostname)):
del self._worker_dict[worker_hostname]
self._worker_dict_lock.release()
self.logger.info("worker signed out: %s" % (worker_hostname))
return True
else:
self._worker_dict_lock.release()
self.logger.warn("failed to sign out worker %s as it is still \
reserved" % (worker_hostname))
return False
self._worker_dict_lock.release()
return True
@Pyro4.expose
def reserve_worker(self, worker_hostname, id):
"""Assign Worker to cluster.
Fails if worker is already assigned to another cluster.
Args:
worker_hostname: Hostname of worker
id: identifier to identify cluster
Returns:
True if successful, False if not.
"""
self._worker_dict_lock.acquire()
if(self._is_assigned(worker_hostname)):
self._worker_dict_lock.release()
return None
else:
if self.valid_ident(id):
self._worker_dict[worker_hostname]["assigned"] = id
pyname = self._worker_dict[worker_hostname]["pyroname"]
self._worker_dict_lock.release()
self.logger.info("reserved worker %s for id %s"
% (worker_hostname, id))
return pyname
else:
self.logger.warn("unknown identfier %s encounterd. Something is \
not right here.")
return None
@Pyro4.expose
def free_worker(self, worker_hostname, id, force=False):
"""Deassign worker from cluster.
Fails if id is not equal to id provided at assignment call. Can be overriden
by force flag.
Args:
worker_hostname: Hostname of Worker
id: identifier of cluster
force: override flag for identifier verification
"""
self._worker_dict_lock.acquire()
if((self._worker_dict[worker_hostname]["assigned"] == id) or force):
self._worker_dict[worker_hostname]["assigned"] = None
self._worker_dict_lock.release()
self.logger.info("worker %s was freed" % worker_hostname)
return True
else:
self._worker_dict_lock.release()
self.logger.warn("failed to free worker %s as it was either not\
reserved or not reserved by freeing id %s"
% (worker_hostname, id))
return False
@Pyro4.expose
def get_free_workers(self):
"""Get list of unassigned workers"""
rd = {}
self._worker_dict_lock.acquire()
w = filter(lambda x: self._worker_dict[x]["assigned"] is None,
self._worker_dict)
for x in w:
rd[x] = self._worker_dict[x]
self._worker_dict_lock.release()
return rd
@Pyro4.expose
def get_workers(self):
"""Get list of all workers"""
self._worker_dict_lock.acquire()
w = self._worker_dict.copy()
self._worker_dict_lock.release()
return w
def main():
NameServer().start()
MaxiNetManager().start()
if(__name__ == "__main__"):
main()
| mit | -6,675,386,687,420,556,000 | 36.108586 | 149 | 0.569377 | false |
mganeva/mantid | Testing/Tools/cxxtest/python/setup.py | 1 | 1938 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
"""
Script to generate the installer for cxxtest.
"""
classifiers = """\
Development Status :: 4 - Beta
Intended Audience :: End Users/Desktop
License :: OSI Approved :: LGPL License
Natural Language :: English
Operating System :: Microsoft :: Windows
Operating System :: Unix
Programming Language :: Python
Topic :: Software Development :: Libraries :: Python Modules
"""
import cxxtest
import glob
import os
def _find_packages(path):
"""
Generate a list of nested packages
"""
pkg_list=[]
if not os.path.exists(path):
return []
if not os.path.exists(path+os.sep+"__init__.py"):
return []
else:
pkg_list.append(path)
for root, dirs, files in os.walk(path, topdown=True):
if root in pkg_list and "__init__.py" in files:
for name in dirs:
if os.path.exists(root+os.sep+name+os.sep+"__init__.py"):
pkg_list.append(root+os.sep+name)
return map(lambda x:x.replace(os.sep,"."), pkg_list)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
packages = _find_packages('cxxtest')
scripts = glob.glob("scripts/*")
doclines = cxxtest.__doc__.split("\n")
setup(name="cxxtest",
version=cxxtest.__version__,
maintainer=cxxtest.__maintainer__,
maintainer_email=cxxtest.__maintainer_email__,
url = cxxtest.__url__,
license = cxxtest.__license__,
platforms = ["any"],
description = doclines[0],
classifiers = filter(None, classifiers.split("\n")),
long_description = "\n".join(doclines[2:]),
packages=packages,
keywords=['utility'],
scripts=scripts
)
| gpl-3.0 | 754,705,326,739,626,200 | 27.925373 | 68 | 0.648091 | false |
nakagami/reportlab | demos/colors/colortest.py | 1 | 2772 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
__version__='''$Id: colortest.py 3959 2012-09-27 14:39:39Z robin $'''
import reportlab.pdfgen.canvas
from reportlab.lib import colors
from reportlab.lib.units import inch
def run():
c = reportlab.pdfgen.canvas.Canvas('colortest.pdf')
#do a test of CMYK interspersed with RGB
#first do RGB values
framePage(c, 'Color Demo - RGB Space and CMYK spaces interspersed' )
y = 700
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'cyan')
c.setFillColorCMYK(1,0,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'red')
c.setFillColorRGB(1,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'magenta')
c.setFillColorCMYK(0,1,0,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'green')
c.setFillColorRGB(0,1,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'yellow')
c.setFillColorCMYK(0,0,1,0)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'blue')
c.setFillColorRGB(0,0,1)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.setFillColorRGB(0,0,0)
c.drawString(100, y, 'black')
c.setFillColorCMYK(0,0,0,1)
c.rect(200, y, 300, 30, fill=1)
y = y - 40
c.showPage()
#do all named colors
framePage(c, 'Color Demo - RGB Space - page %d' % c.getPageNumber())
all_colors = reportlab.lib.colors.getAllNamedColors().items()
all_colors.sort() # alpha order by name
c.setFont('Times-Roman', 12)
c.drawString(72,730, 'This shows all the named colors in the HTML standard.')
y = 700
for (name, color) in all_colors:
c.setFillColor(colors.black)
c.drawString(100, y, name)
c.setFillColor(color)
c.rect(200, y-10, 300, 30, fill=1)
y = y - 40
if y < 100:
c.showPage()
framePage(c, 'Color Demo - RGB Space - page %d' % c.getPageNumber())
y = 700
c.save()
def framePage(canvas, title):
canvas.setFont('Times-BoldItalic',20)
canvas.drawString(inch, 10.5 * inch, title)
canvas.setFont('Times-Roman',10)
canvas.drawCentredString(4.135 * inch, 0.75 * inch,
'Page %d' % canvas.getPageNumber())
#draw a border
canvas.setStrokeColorRGB(1,0,0)
canvas.setLineWidth(5)
canvas.line(0.8 * inch, inch, 0.8 * inch, 10.75 * inch)
#reset carefully afterwards
canvas.setLineWidth(1)
canvas.setStrokeColorRGB(0,0,0)
if __name__ == '__main__':
run()
| bsd-3-clause | -1,089,536,343,542,370,600 | 25.4 | 81 | 0.603896 | false |
frostynova/calico-docker | calico_containers/tests/st/test_no_powerstrip.py | 1 | 2590 | # Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sh import ErrorReturnCode
from functools import partial
from test_base import TestBase
from docker_host import DockerHost
class TestNoPowerstrip(TestBase):
def test_no_powerstrip(self):
"""
Test mainline functionality without using powerstrip.
"""
host = DockerHost('host')
host.calicoctl("profile add TEST_GROUP")
# Remove the environment variable such that docker run does not utilize
# powerstrip.
node1 = host.create_workload("node1", use_powerstrip=False)
node2 = host.create_workload("node2", use_powerstrip=False)
# Attempt to configure the nodes with the same profiles. This will fail
# since we didn't use powerstrip to create the nodes.
with self.assertRaises(ErrorReturnCode):
host.calicoctl("profile TEST_GROUP member add %s" % node1)
with self.assertRaises(ErrorReturnCode):
host.calicoctl("profile TEST_GROUP member add %s" % node2)
# Add the nodes to Calico networking.
ip1, ip2 = "192.168.1.1", "192.168.1.2"
host.calicoctl("container add %s %s" % (node1, ip1))
host.calicoctl("container add %s %s" % (node2, ip2))
# Now add the profiles.
host.calicoctl("profile TEST_GROUP member add %s" % node1)
host.calicoctl("profile TEST_GROUP member add %s" % node2)
# Inspect the nodes (ensure this works without powerstrip)
host.execute("docker inspect %s" % node1)
host.execute("docker inspect %s" % node2)
# Check it works
node1.assert_can_ping(ip1, retries=3)
node1.assert_can_ping(ip2)
node2.assert_can_ping(ip1)
node2.assert_can_ping(ip2)
# Test the teardown commands
host.calicoctl("profile remove TEST_GROUP")
host.calicoctl("container remove %s" % node1)
host.calicoctl("container remove %s" % node2)
host.calicoctl("pool remove 192.168.0.0/16")
host.calicoctl("node stop")
| apache-2.0 | 5,703,003,683,250,878,000 | 38.242424 | 80 | 0.671429 | false |
lyapandra/python-course-SG | hw_001_01.py | 1 | 4683 | # Списки
# Заповнити код приведених нижче функцій. Функція main() вже
# налаштована для виклику функцій з декількома різними
# параметрами, і виводить 'OK' у випадку, якщо виклик функції
# коректний.
# Початковий код кожної функції містить 'return'
# і є просто заготовкою для вашого коду.
# A. Початок і кінець співвпадають
# Функція приймає в якості аргумента список стрічок.
# Необхідно повернутиь кількість стрічок,
# довжина яких складає 2 символи і більше,
# а перший і останній символи цих стрічок співпадають.
# Примітка: в python немає оператора ++. Але += працює.
def match_ends(words):
# +++ ваш код +++
return
# B. Починаються з X на початку
# Функція приймає в якості аргументу список стрічок.
# Необхідно повернути відсортований список стрічок, в якому:
# спочатку йде група стрічок, що починаються на 'x', потім всі інші.
# Нариклад: з ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] получиться
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Підказка: це можна зробити за допомогою склеювання 2х наперед відсортованих списків
def front_x(words):
# +++ ваш код +++
return
# C. Сортування за останнім числом
# Дано список непустих списків.
# Потрібно повернути список, відсортований за
# зростанням останнього елемента кожного підсписку.
# Наприклад: із [[1, 7], [1, 3], [3, 4, 5], [2, 2]] получиться
# [[2, 2], [1, 3], [3, 4, 5], [1, 7]]
# Підказка: використовуйте параметр key= функції сортування,
# щоб отримати останній елемент підсписку.
def sort_last(lists):
# +++ ваш код +++
return
# D. Видалення сусідів
# Дано список чисел.
# Потрібно повернути список, де всі сусідні елементи
# були б зведені до одного елемента.
# Таким чином, із [1, 2, 2, 3, 4, 4] получиться [1, 2, 3, 4].
def remove_adjacent(nums):
# +++ ваш код +++
return
# Проста функція test() використовується в main() для виведення
# порівняння того, що повертається з функції з тим, що вона повинна повертати.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s Отримано: %s | Очікувалося: %s' % (prefix, repr(got), repr(expected)))
# Викликає фунції вище з тестовими параметрами.
def main():
print('Початок і кінець співвпадають')
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print()
print('Починаються з X на початку')
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print()
print('Сортировка по последнему числу')
test(sort_last([[1, 3], [3, 2], [2, 1]]),
[[2, 1], [3, 2], [1, 3]])
test(sort_last([[2, 3], [1, 2], [3, 1]]),
[[3, 1], [1, 2], [2, 3]])
test(sort_last([[1, 7], [1, 6], [3, 4, 5], [2, 2]]),
[[2, 2], [3, 4, 5], [1, 6], [1, 7]])
print()
print('Видалення сусідів')
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3, 3]), [2, 3])
test(remove_adjacent([4, 5, 5, 4, 4]), [4, 5, 4])
test(remove_adjacent([]), [])
if __name__ == '__main__':
main()
| gpl-3.0 | 890,874,095,567,146,200 | 31.68932 | 85 | 0.609148 | false |
MUNDO-platform/srccode | ckan-extensions/ckanext-wsstore/ckanext/wsstore/plugin.py | 1 | 1411 | # Platforma MUNDO – Dane po Warszawsku http://danepowarszawsku.pl/
# @authors Jaroslaw Legierski, Tomasz Janisiewicz, Henryk Rosa Centrum Badawczo – Rozwojowe/ Orange Labs
# copyright (c) 2014-2015 Orange Polska S.A. niniejszy kod jest otwarty i dystrybuowany
# na licencji: Lesser General Public License v2.1 (LGPLv2.1), której pełny tekst można
# znaleźć pod adresem: https://www.gnu.org/licenses/lgpl-2.1.html
#
# oprogramowanie stworzone w ramach Projektu : MUNDO Miejskie Usługi Na Danych Oparte
# Beneficjenci: Fundacja Techsoup, Orange Polska S.A., Politechnika Warszawska,
# Fundacja Pracownia Badań i Innowacji Społecznych „Stocznia”, Fundacja Projekt Polska
# Wartość projektu: 1 108 978
# Wartość dofinansowania: 847 000
# Okres realizacji 01.04.2014 – 31.12.2015
# Projekt współfinansowany przez Narodowe Centrum Badań i Rozwoju w ramach
# Programu Innowacje Społeczne
import ckan.plugins as plugins
import logic.action as action
import logging
log = logging.getLogger(__name__)
class IWsStorePlugin(plugins.SingletonPlugin):
plugins.implements(plugins.IActions)
def get_actions(self):
actions = {
'wsstore_create':action.wsstore_create,
'wsstore_delete':action.wsstore_delete,
'wsstore_show':action.wsstore_show,
'wsstore_get': action.wsstore_get
}
return actions
| lgpl-2.1 | 3,421,587,500,985,983,000 | 42.28125 | 105 | 0.731408 | false |
ic-hep/DIRAC | Resources/Storage/test/FIXME_Test_StorageElement.py | 1 | 38208 | #! /usr/bin/env python
# FIXME: if it requires a dirac.cfg it is not a unit test and should be moved to tests directory
import unittest
import time
import os
import shutil
import sys
import types
from DIRAC.Core.Base.Script import parseCommandLine, getPositionalArgs
parseCommandLine()
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
from DIRAC.Core.Utilities.File import getSize
from DIRAC import gLogger
positionalArgs = getPositionalArgs()
__RCSID__ = "$Id$"
if len( positionalArgs ) < 3:
print 'Usage: TestStoragePlugIn.py StorageElement <lfnDir> <localFile>'
sys.exit()
else:
storageElementToTest = positionalArgs[0]
lfnDirToTest = positionalArgs[1]
fileToTest = positionalArgs[2]
class StorageElementTestCase( unittest.TestCase ):
""" Base class for the StorageElement test cases
"""
def setUp( self ):
self.numberOfFiles = 1
self.storageElement = StorageElement( storageElementToTest )
self.localSourceFile = fileToTest
self.localFileSize = getSize( self.localSourceFile )
self.destDirectory = lfnDirToTest
# destinationDir = returnSingleResult( self.storageElement.getURL( self.destDirectory ) )['Value']
destinationDir = self.destDirectory
res = self.storageElement.createDirectory( destinationDir )
self.assert_( res['OK'] )
def tearDown( self ):
# destinationDir = returnSingleResult( self.storageElement.getURL( self.destDirectory ) )['Value']
res = self.storageElement.removeDirectory( self.destDirectory, recursive = True )
self.assert_( res['OK'] )
class GetInfoTestCase( StorageElementTestCase ):
def test_dump( self ):
print '\n\n#########################################################################\n\n\t\t\tDump test\n'
self.storageElement.dump()
def test_isValid( self ):
print '\n\n#########################################################################\n\n\t\t\tIs valid test\n'
res = self.storageElement.isValid()
self.assert_( res['OK'] )
def test_getRemotePlugins( self ):
print '\n\n#########################################################################\n\n\t\t\tGet remote protocols test\n'
res = self.storageElement.getRemotePlugins()
self.assert_( res['OK'] )
self.assertEqual( type( res['Value'] ), types.ListType )
def test_getLocalPlugins( self ):
print '\n\n#########################################################################\n\n\t\t\tGet local protocols test\n'
res = self.storageElement.getLocalPlugins()
self.assert_( res['OK'] )
self.assertEqual( type( res['Value'] ), types.ListType )
def test_getPlugins( self ):
print '\n\n#########################################################################\n\n\t\t\tGet protocols test\n'
res = self.storageElement.getPlugins()
self.assert_( res['OK'] )
self.assertEqual( type( res['Value'] ), types.ListType )
#def test_isLocalSE( self ):
# print '\n\n#########################################################################\n\n\t\t\tIs local SE test\n'
# res = self.storageElement.isLocalSE()
# self.assert_( res['OK'] )
# self.assertFalse( res['Value'] )
#def test_getStorageElementOption( self ):
# print '\n\n#########################################################################\n\n\t\t\tGet storage element option test\n'
# res = self.storageElement.getStorageElementOption( 'BackendType' )
# self.assert_( res['OK'] )
# self.assertEqual( res['Value'], 'DISET' )
def test_getStorageParameters( self ):
print '\n\n#########################################################################\n\n\t\t\tGet storage parameters test\n'
result = self.storageElement.getStorageParameters( 'DIP' )
self.assert_( result['OK'] )
resDict = result['Value']
self.assertEqual( resDict['Protocol'], 'dips' )
#self.assertEqual( resDict['SpaceToken'], 'LHCb_RAW' )
#self.assertEqual( resDict['WSUrl'], '/srm/managerv2?SFN=' )
#self.assertEqual( resDict['Host'], 'srm-lhcb.cern.ch' )
#self.assertEqual( resDict['Path'], '/castor/cern.ch/grid' )
#self.assertEqual( resDict['ProtocolName'], 'SRM2' )
#self.assertEqual( resDict['Port'], '8443' )
class FileTestCases( StorageElementTestCase ):
def test_exists( self ):
print '\n\n#########################################################################\n\n\t\t\tExists test\n'
destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = self.storageElement.getURL( destinationFilePath )
#destinationPfn = pfnForLfnRes['Value']['Successful'].values()[0]
fileDict = {destinationFilePath:self.localSourceFile}
putFileRes = returnSingleResult( self.storageElement.putFile( fileDict ) )
# File exists
existsRes = returnSingleResult( self.storageElement.exists( destinationFilePath ) )
# Now remove the destination file
removeFileRes = returnSingleResult( self.storageElement.removeFile( destinationFilePath ) )
# Check removed file
missingExistsRes = returnSingleResult( self.storageElement.exists( destinationFilePath ) )
# Check directories are handled properly
destinationDir = os.path.dirname( destinationFilePath )
directoryExistsRes = returnSingleResult( self.storageElement.exists( destinationDir ) )
# Check that the put was done correctly
self.assert_( putFileRes['OK'] )
self.assert_( putFileRes['Value'] )
self.assertEqual( putFileRes['Value'], self.localFileSize )
# Check that we checked the file correctly
self.assert_( existsRes['OK'] )
self.assert_( existsRes['Value'] )
# Check that the removal was done correctly
self.assert_( removeFileRes['OK'] )
self.assert_( removeFileRes['Value'] )
# Check the exists for non existant file
self.assert_( missingExistsRes['OK'] )
self.assertFalse( missingExistsRes['Value'] )
# Check that directories exist
self.assert_( directoryExistsRes['OK'] )
self.assert_( directoryExistsRes['Value'] )
def test_isFile( self ):
print '\n\n#########################################################################\n\n\t\t\tIs file size test\n'
destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
#destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath:self.localSourceFile}
putFileRes = returnSingleResult( self.storageElement.putFile( fileDict ) )
# Is a file
isFileRes = returnSingleResult( self.storageElement.isFile( destinationFilePath ) )
# Now remove the destination file
removeFileRes = returnSingleResult( self.storageElement.removeFile( destinationFilePath ) )
# Get metadata for a removed file
missingIsFileRes = returnSingleResult( self.storageElement.isFile( destinationFilePath ) )
# Check directories are handled properly
destinationDir = os.path.dirname( destinationFilePath )
directoryIsFileRes = returnSingleResult( self.storageElement.isFile( destinationDir ) )
# Check that the put was done correctly
self.assert_( putFileRes['OK'] )
self.assert_( putFileRes['Value'] )
self.assertEqual( putFileRes['Value'], self.localFileSize )
# Check that we checked the file correctly
self.assert_( isFileRes['OK'] )
self.assert_( isFileRes['Value'] )
# Check that the removal was done correctly
self.assert_( removeFileRes['OK'] )
self.assert_( removeFileRes['Value'] )
# Check the is file for non existant file
self.assertFalse( missingIsFileRes['OK'] )
expectedError = "File does not exist"
self.assert_( expectedError in missingIsFileRes['Message'] )
# Check that is file operation with a directory
self.assert_( directoryIsFileRes['OK'] )
self.assertFalse( directoryIsFileRes['Value'] )
def test_putFile( self ):
print '\n\n#########################################################################\n\n\t\t\tPut file test\n'
destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
#destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath:self.localSourceFile}
putFileRes = returnSingleResult( self.storageElement.putFile( fileDict ) )
# Now remove the destination file
removeFileRes = returnSingleResult( self.storageElement.removeFile( destinationFilePath ) )
# Check that the put was done correctly
self.assert_( putFileRes['OK'] )
self.assert_( putFileRes['Value'] )
self.assertEqual( putFileRes['Value'], self.localFileSize )
# Check that the removal was done correctly
self.assert_( removeFileRes['OK'] )
self.assert_( removeFileRes['Value'] )
def test_getFile( self ):
print '\n\n#########################################################################\n\n\t\t\tGet file test\n'
destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
#destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath:self.localSourceFile}
putFileRes = returnSingleResult( self.storageElement.putFile( fileDict ) )
# Now get a local copy of the file
getFileRes = returnSingleResult( self.storageElement.getFile( destinationFilePath ) )
# Now remove the destination file
removeFileRes = returnSingleResult( self.storageElement.removeFile( destinationFilePath ) )
# Clean up the local mess
os.remove( os.path.basename( destinationFilePath ) )
# Check that the put was done correctly
self.assert_( putFileRes['OK'] )
self.assert_( putFileRes['Value'] )
self.assertEqual( putFileRes['Value'], self.localFileSize )
# Check that we got the file correctly
self.assert_( getFileRes['OK'] )
self.assertEqual( getFileRes['Value'], self.localFileSize )
# Check that the removal was done correctly
self.assert_( removeFileRes['OK'] )
self.assert_( removeFileRes['Value'] )
def test_getFileMetadata( self ):
print '\n\n#########################################################################\n\n\t\t\tGet file metadata test\n'
destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
#destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath:self.localSourceFile}
putFileRes = returnSingleResult( self.storageElement.putFile( fileDict ) )
# Get the file metadata
getFileMetadataRes = returnSingleResult( self.storageElement.getFileMetadata( destinationFilePath ) )
# Now remove the destination file
removeFileRes = returnSingleResult( self.storageElement.removeFile( destinationFilePath ) )
# Get metadata for a removed file
getMissingFileMetadataRes = returnSingleResult( self.storageElement.getFileMetadata( destinationFilePath ) )
# Check directories are handled properly
destinationDir = os.path.dirname( destinationFilePath )
directoryMetadataRes = returnSingleResult( self.storageElement.getFileMetadata( destinationDir ) )
# Check that the put was done correctly
self.assert_( putFileRes['OK'] )
self.assert_( putFileRes['Value'] )
self.assertEqual( putFileRes['Value'], self.localFileSize )
# Check that the metadata was done correctly
self.assert_( getFileMetadataRes['OK'] )
metadataDict = getFileMetadataRes['Value']
# Works only for SRM2 plugin
# self.assert_( metadataDict['Cached'] )
# self.assertFalse( metadataDict['Migrated'] )
self.assertEqual( metadataDict['Size'], self.localFileSize )
# Check that the removal was done correctly
self.assert_( removeFileRes['OK'] )
self.assert_( removeFileRes['Value'] )
# Check the get metadata for non existant file
self.assertFalse( getMissingFileMetadataRes['OK'] )
expectedError = "File does not exist"
self.assert_( expectedError in getMissingFileMetadataRes['Message'] )
# Check that metadata operation with a directory
self.assertFalse( directoryMetadataRes['OK'] )
expectedError = "Supplied path is not a file"
self.assert_( expectedError in directoryMetadataRes['Message'] )
def test_getFileSize( self ):
print '\n\n#########################################################################\n\n\t\t\tGet file size test\n'
destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
#destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath:self.localSourceFile}
putFileRes = returnSingleResult( self.storageElement.putFile( fileDict ) )
# Get the file metadata
getFileSizeRes = returnSingleResult( self.storageElement.getFileSize( destinationFilePath ) )
# Now remove the destination file
removeFileRes = returnSingleResult( self.storageElement.removeFile( destinationFilePath ) )
# Get metadata for a removed file
getMissingFileSizeRes = returnSingleResult( self.storageElement.getFileSize( destinationFilePath ) )
# Check directories are handled properly
destinationDir = os.path.dirname( destinationFilePath )
directorySizeRes = returnSingleResult( self.storageElement.getFileSize( destinationDir ) )
# Check that the put was done correctly
self.assert_( putFileRes['OK'] )
self.assert_( putFileRes['Value'] )
self.assertEqual( putFileRes['Value'], self.localFileSize )
# Check that the metadata was done correctly
self.assert_( getFileSizeRes['OK'] )
self.assertEqual( getFileSizeRes['Value'], self.localFileSize )
# Check that the removal was done correctly
self.assert_( removeFileRes['OK'] )
self.assert_( removeFileRes['Value'] )
# Check the get metadata for non existant file
self.assertFalse( getMissingFileSizeRes['OK'] )
expectedError = "File does not exist"
self.assert_( expectedError in getMissingFileSizeRes['Message'] )
# Check that metadata operation with a directory
self.assertFalse( directorySizeRes['OK'] )
expectedError = "Supplied path is not a file"
self.assert_( expectedError in directorySizeRes['Message'] )
# Works only for SRM2 plugins
# def test_prestageFile( self ):
# print '\n\n#########################################################################\n\n\t\t\tPrestage file test\n'
# destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = self.storageElement.getURL( destinationFilePath )
# destinationPfn = pfnForLfnRes['Value']
# fileDict = {destinationPfn:self.localSourceFile}
# putFileRes = self.storageElement.putFile( fileDict, singleFile = True )
# # Get the file metadata
# prestageFileRes = self.storageElement.prestageFile( destinationPfn, singleFile = True )
# # Now remove the destination file
# removeFileRes = self.storageElement.removeFile( destinationPfn, singleFile = True )
# # Get metadata for a removed file
# missingPrestageFileRes = self.storageElement.prestageFile( destinationPfn, singleFile = True )
#
# # Check that the put was done correctly
# self.assert_( putFileRes['OK'] )
# self.assert_( putFileRes['Value'] )
# self.assertEqual( putFileRes['Value'], self.localFileSize )
# # Check that the prestage was done correctly
# self.assert_( prestageFileRes['OK'] )
# self.assertEqual( type( prestageFileRes['Value'] ), types.StringType )
# # Check that the removal was done correctly
# self.assert_( removeFileRes['OK'] )
# self.assert_( removeFileRes['Value'] )
# # Check the prestage for non existant file
# self.assertFalse( missingPrestageFileRes['OK'] )
# expectedError = "No such file or directory"
# self.assert_( expectedError in missingPrestageFileRes['Message'] )
# Works only for SRM2 plugins
# def test_prestageStatus( self ):
# print '\n\n#########################################################################\n\n\t\tPrestage status test\n'
# destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = self.storageElement.getURL( destinationFilePath )
# destinationPfn = pfnForLfnRes['Value']
# fileDict = {destinationPfn:self.localSourceFile}
# putFileRes = self.storageElement.putFile( fileDict, singleFile = True )
# # Get the file metadata
# prestageFileRes = self.storageElement.prestageFile( destinationPfn, singleFile = True )
# srmID = ''
# if prestageFileRes['OK']:
# srmID = prestageFileRes['Value']
# # Take a quick break to allow the SRM to realise the file is available
# sleepTime = 10
# print 'Sleeping for %s seconds' % sleepTime
# time.sleep( sleepTime )
# # Check that we can monitor the stage request
# prestageStatusRes = self.storageElement.prestageFileStatus( {destinationPfn:srmID}, singleFile = True )
# # Now remove the destination file
# removeFileRes = self.storageElement.removeFile( destinationPfn, singleFile = True )
#
# # Check that the put was done correctly
# self.assert_( putFileRes['OK'] )
# self.assert_( putFileRes['Value'] )
# self.assertEqual( putFileRes['Value'], self.localFileSize )
# # Check that the prestage was done correctly
# self.assert_( prestageFileRes['OK'] )
# self.assertEqual( type( prestageFileRes['Value'] ), types.StringType )
# # Check the file was found to be staged
# self.assert_( prestageStatusRes['OK'] )
# self.assert_( prestageStatusRes['Value'] )
# # Check that the removal was done correctly
# self.assert_( removeFileRes['OK'] )
# self.assert_( removeFileRes['Value'] )
# Works only for SRM2 plugins
# def test_pinRelease( self ):
# print '\n\n#########################################################################\n\n\t\tPin release test\n'
# destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = self.storageElement.getURL( destinationFilePath )
# destinationPfn = pfnForLfnRes['Value']
# fileDict = {destinationPfn:self.localSourceFile}
# putFileRes = self.storageElement.putFile( fileDict, singleFile = True )
# # Get the file metadata
# pinFileRes = self.storageElement.pinFile( destinationPfn, singleFile = True )
# srmID = ''
# if pinFileRes['OK']:
# srmID = pinFileRes['Value']
# # Check that we can release the file
# releaseFileRes = self.storageElement.releaseFile( {destinationPfn:srmID}, singleFile = True )
# # Now remove the destination file
# removeFileRes = self.storageElement.removeFile( destinationPfn, singleFile = True )
#
# # Check that the put was done correctly
# self.assert_( putFileRes['OK'] )
# self.assert_( putFileRes['Value'] )
# self.assertEqual( putFileRes['Value'], self.localFileSize )
# # Check that the file pin was done correctly
# self.assert_( pinFileRes['OK'] )
# self.assertEqual( type( pinFileRes['Value'] ), types.StringType )
# # Check the file was found to be staged
# self.assert_( releaseFileRes['OK'] )
# self.assert_( releaseFileRes['Value'] )
# # Check that the removal was done correctly
# self.assert_( removeFileRes['OK'] )
# self.assert_( removeFileRes['Value'] )
def test_getURL( self ):
print '\n\n#########################################################################\n\n\t\tGet access url test\n'
destinationFilePath = '%s/testFile.%s' % ( self.destDirectory, time.time() )
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( destinationFilePath ) )
#destinationPfn = pfnForLfnRes['Value']
fileDict = {destinationFilePath:self.localSourceFile}
putFileRes = returnSingleResult( self.storageElement.putFile( fileDict ) )
# Get a transfer url for the file
getTurlRes = self.storageElement.getURL( destinationFilePath, protocol = 'dips' )
# Remove the destination file
removeFileRes = returnSingleResult( self.storageElement.removeFile( destinationFilePath ) )
# Get missing turl res
getMissingTurlRes = self.storageElement.getURL( destinationFilePath, protocol = 'dips' )
# Check that the put was done correctly
self.assert_( putFileRes['OK'] )
self.assert_( putFileRes['Value'] )
self.assertEqual( putFileRes['Value'], self.localFileSize )
# Check that we can get the tURL properly
self.assert_( getTurlRes['OK'] )
self.assert_( getTurlRes['Value'] )
self.assert_( type( getTurlRes['Value'] ) == types.DictType )
self.assert_( type( getTurlRes['Value']['Successful'][destinationFilePath] ) in types.StringTypes )
# Check that the removal was done correctly
self.assert_( removeFileRes['OK'] )
self.assert_( removeFileRes['Value'] )
# Works only for SRM2 plugins
# # Check that non-existant files are handled correctly
# self.assertFalse( getMissingTurlRes['OK'] )
# expectedError = "File does not exist"
# self.assert_( expectedError in getMissingTurlRes['Message'] )
class DirectoryTestCases( StorageElementTestCase ):
def test_createDirectory( self ):
print '\n\n#########################################################################\n\n\t\t\tCreate directory test\n'
directory = "%s/%s" % ( self.destDirectory, 'createDirectoryTest' )
# pfnForLfnRes = returnSingleResult( self.storageElement.getURL( directory ) )
#directoryPfn = pfnForLfnRes['Value']
createDirRes = self.storageElement.createDirectory( directory )
# Remove the target dir
removeDirRes = self.storageElement.removeDirectory( directory, recursive = True )
# Check that the creation was done correctly
self.assert_( createDirRes['OK'] )
self.assert_( createDirRes['Value'] )
# Remove the directory
self.assert_( removeDirRes['OK'] )
self.assert_( removeDirRes['Value'] )
def test_isDirectory( self ):
print '\n\n#########################################################################\n\n\t\t\tIs directory test\n'
destDirectory = self.destDirectory
# Test that it is a directory
isDirectoryRes = self.storageElement.isDirectory( destDirectory )
# Test that no existant dirs are handled correctly
nonExistantDir = "%s/%s" % ( destDirectory, 'NonExistant' )
nonExistantDirRes = self.storageElement.isDirectory( nonExistantDir )
# Check that it works with the existing dir
self.assert_( isDirectoryRes['OK'] )
self.assert_( isDirectoryRes['Value'] )
# Check that we handle non existant correctly
self.assert_( nonExistantDirRes['Value']['Failed'][nonExistantDir] in ['Path does not exist'] )
def test_listDirectory( self ):
print '\n\n#########################################################################\n\n\t\t\tList directory test\n'
destDirectory = "%s/%s" % ( self.destDirectory, 'listDirectoryTest' )
# destDirectory = returnSingleResult( self.storageElement.getURL( directory ) )['Value']
# Create a local directory to upload
localDir = '/tmp/unit-test'
srcFile = '/etc/group'
sizeOfLocalFile = getSize( srcFile )
if not os.path.exists( localDir ):
os.mkdir( localDir )
for i in range( self.numberOfFiles ):
shutil.copy( srcFile, '%s/testFile.%s' % ( localDir, time.time() ) )
time.sleep( 1 )
# Check that we can successfully upload the directory to the storage element
dirDict = {destDirectory:localDir}
putDirRes = self.storageElement.putDirectory( dirDict )
print putDirRes
# List the remote directory
listDirRes = self.storageElement.listDirectory( destDirectory )
# Now remove the remove directory
removeDirRes = self.storageElement.removeDirectory( destDirectory, recursive = True )
print removeDirRes
#Clean up the locally created directory
shutil.rmtree( localDir )
# Perform the checks for the put dir operation
self.assert_( putDirRes['OK'] )
self.assert_( putDirRes['Value'] )
if putDirRes['Value']['Successful'][destDirectory]['Files']:
self.assertEqual( putDirRes['Value']['Successful'][destDirectory]['Files'], self.numberOfFiles )
self.assertEqual( putDirRes['Value']['Successful'][destDirectory]['Size'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( putDirRes['Value']['Successful'][destDirectory]['Files'] ) in [types.IntType, types.LongType] )
self.assert_( type( putDirRes['Value']['Successful'][destDirectory]['Size'] ) in [types.IntType, types.LongType] )
# Perform the checks for the list dir operation
self.assert_( listDirRes['OK'] )
self.assert_( listDirRes['Value'] )
self.assert_( listDirRes['Value']['Successful'][destDirectory].has_key( 'SubDirs' ) )
self.assert_( listDirRes['Value']['Successful'][destDirectory].has_key( 'Files' ) )
self.assertEqual( len( listDirRes['Value']['Successful'][destDirectory]['Files'].keys() ), self.numberOfFiles )
# Perform the checks for the remove directory operation
self.assert_( removeDirRes['OK'] )
self.assert_( removeDirRes['Value'] )
if removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved']:
self.assertEqual( removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved'], self.numberOfFiles )
self.assertEqual( removeDirRes['Value']['Successful'][destDirectory]['SizeRemoved'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved'] ) in [types.IntType, types.LongType] )
self.assert_( type( removeDirRes['Value']['Successful'][destDirectory]['SizeRemoved'] ) in [types.IntType, types.LongType] )
def test_getDirectoryMetadata( self ):
print '\n\n#########################################################################\n\n\t\t\tDirectory metadata test\n'
destDirectory = "%s/%s" % ( self.destDirectory, 'getDirectoryMetadataTest' )
# destDirectory = returnSingleResult( self.storageElement.getURL( directory ) )['Value']
# Create a local directory to upload
localDir = '/tmp/unit-test'
srcFile = '/etc/group'
sizeOfLocalFile = getSize( srcFile )
if not os.path.exists( localDir ):
os.mkdir( localDir )
for i in range( self.numberOfFiles ):
shutil.copy( srcFile, '%s/testFile.%s' % ( localDir, time.time() ) )
time.sleep( 1 )
# Check that we can successfully upload the directory to the storage element
dirDict = {destDirectory:localDir}
putDirRes = self.storageElement.putDirectory( dirDict )
# Get the directory metadata
metadataDirRes = self.storageElement.getDirectoryMetadata( destDirectory )
# Now remove the remove directory
removeDirRes = self.storageElement.removeDirectory( destDirectory, recursive = True )
#Clean up the locally created directory
shutil.rmtree( localDir )
# Perform the checks for the put dir operation
self.assert_( putDirRes['OK'] )
self.assert_( putDirRes['Value'] )
if putDirRes['Value']['Successful'][destDirectory]['Files']:
self.assertEqual( putDirRes['Value']['Successful'][destDirectory]['Files'], self.numberOfFiles )
self.assertEqual( putDirRes['Value']['Successful'][destDirectory]['Size'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( putDirRes['Value']['Successful'][destDirectory]['Files'] ) in [types.IntType, types.LongType] )
self.assert_( type( putDirRes['Value']['Successful'][destDirectory]['Size'] ) in [types.IntType, types.LongType] )
# Perform the checks for the list dir operation
self.assert_( metadataDirRes['OK'] )
self.assert_( metadataDirRes['Value'] )
# Works only for the SRM2 plugin
# self.assert_( metadataDirRes['Value']['Mode'] )
# self.assert_( type( metadataDirRes['Value']['Mode'] ) == types.IntType )
self.assert_( metadataDirRes['Value']['Successful'][destDirectory]['Exists'] )
self.assertEqual( metadataDirRes['Value']['Successful'][destDirectory]['Type'], 'Directory' )
# Perform the checks for the remove directory operation
self.assert_( removeDirRes['OK'] )
self.assert_( removeDirRes['Value'] )
if removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved']:
self.assertEqual( removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved'], self.numberOfFiles )
self.assertEqual( removeDirRes['Value']['Successful'][destDirectory]['SizeRemoved'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved'] ) in [types.IntType, types.LongType] )
self.assert_( type( removeDirRes['Value']['Successful'][destDirectory]['SizeRemoved'] ) in [types.IntType, types.LongType] )
def test_getDirectorySize( self ):
print '\n\n#########################################################################\n\n\t\t\tGet directory size test\n'
destDirectory = "%s/%s" % ( self.destDirectory, 'getDirectorySizeTest' )
# destDirectory = returnSingleResult( self.storageElement.getURL( directory ) )['Value']
# Create a local directory to upload
localDir = '/tmp/unit-test'
srcFile = '/etc/group'
sizeOfLocalFile = getSize( srcFile )
if not os.path.exists( localDir ):
os.mkdir( localDir )
for i in range( self.numberOfFiles ):
shutil.copy( srcFile, '%s/testFile.%s' % ( localDir, time.time() ) )
time.sleep( 1 )
# Check that we can successfully upload the directory to the storage element
dirDict = {destDirectory:localDir}
putDirRes = self.storageElement.putDirectory( dirDict )
# Get the directory metadata
getDirSizeRes = self.storageElement.getDirectorySize( destDirectory )
# Now remove the remove directory
removeDirRes = self.storageElement.removeDirectory( destDirectory, recursive = True )
#Clean up the locally created directory
shutil.rmtree( localDir )
# Perform the checks for the put dir operation
self.assert_( putDirRes['OK'] )
self.assert_( putDirRes['Value'] )
if putDirRes['Value']['Successful'][destDirectory]['Files']:
self.assertEqual( putDirRes['Value']['Successful'][destDirectory]['Files'], self.numberOfFiles )
self.assertEqual( putDirRes['Value']['Successful'][destDirectory]['Size'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( putDirRes['Value']['Successful'][destDirectory]['Files'] ) in [types.IntType, types.LongType] )
self.assert_( type( putDirRes['Value']['Successful'][destDirectory]['Size'] ) in [types.IntType, types.LongType] )
# Perform the checks for the get dir size operation
self.assert_( getDirSizeRes['OK'] )
self.assert_( getDirSizeRes['Value'] )
self.assertFalse( getDirSizeRes['Value']['Successful'][destDirectory]['SubDirs'] )
self.assert_( type( getDirSizeRes['Value']['Successful'][destDirectory]['Files'] ) in [types.IntType, types.LongType] )
self.assert_( type( getDirSizeRes['Value']['Successful'][destDirectory]['Size'] ) in [types.IntType, types.LongType] )
# Perform the checks for the remove directory operation
self.assert_( removeDirRes['OK'] )
self.assert_( removeDirRes['Value'] )
if removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved']:
self.assertEqual( removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved'], self.numberOfFiles )
self.assertEqual( removeDirRes['Value']['Successful'][destDirectory]['SizeRemoved'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved'] ) in [types.IntType, types.LongType] )
self.assert_( type( removeDirRes['Value']['Successful'][destDirectory]['SizeRemoved'] ) in [types.IntType, types.LongType] )
def test_removeDirectory( self ):
print '\n\n#########################################################################\n\n\t\t\tRemove directory test\n'
destDirectory = "%s/%s" % ( self.destDirectory, 'removeDirectoryTest' )
# destDirectory = returnSingleResult( self.storageElement.getURL( directory ) )['Value']
# Create a local directory to upload
localDir = '/tmp/unit-test'
srcFile = '/etc/group'
sizeOfLocalFile = getSize( srcFile )
if not os.path.exists( localDir ):
os.mkdir( localDir )
for i in range( self.numberOfFiles ):
shutil.copy( srcFile, '%s/testFile.%s' % ( localDir, time.time() ) )
time.sleep( 1 )
# Check that we can successfully upload the directory to the storage element
dirDict = {destDirectory:localDir}
putDirRes = self.storageElement.putDirectory( dirDict )
# Get the directory metadata
# Now remove the remove directory
removeDirRes = self.storageElement.removeDirectory( destDirectory, recursive = True )
#Clean up the locally created directory
shutil.rmtree( localDir )
# Perform the checks for the put dir operation
self.assert_( putDirRes['OK'] )
self.assert_( putDirRes['Value'] )
if putDirRes['Value']['Successful'][destDirectory]['Files']:
self.assertEqual( putDirRes['Value']['Successful'][destDirectory]['Files'], self.numberOfFiles )
self.assertEqual( putDirRes['Value']['Successful'][destDirectory]['Size'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( putDirRes['Value']['Successful'][destDirectory]['Files'] ) in [types.IntType, types.LongType] )
self.assert_( type( putDirRes['Value']['Successful'][destDirectory]['Size'] ) in [types.IntType, types.LongType] )
# Perform the checks for the remove directory operation
self.assert_( removeDirRes['OK'] )
self.assert_( removeDirRes['Value'] )
if removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved']:
self.assertEqual( removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved'], self.numberOfFiles )
self.assertEqual( removeDirRes['Value']['Successful'][destDirectory]['SizeRemoved'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved'] ) in [types.IntType, types.LongType] )
self.assert_( type( removeDirRes['Value']['Successful'][destDirectory]['SizeRemoved'] ) in [types.IntType, types.LongType] )
def test_getDirectory( self ):
print '\n\n#########################################################################\n\n\t\t\tGet directory test\n'
destDirectory = "%s/%s" % ( self.destDirectory, 'getDirectoryTest' )
# destDirectory = returnSingleResult( self.storageElement.getURL( directory ) )['Value']
# Create a local directory to upload
localDir = '/tmp/unit-test'
srcFile = '/etc/group'
sizeOfLocalFile = getSize( srcFile )
if not os.path.exists( localDir ):
os.mkdir( localDir )
for i in range( self.numberOfFiles ):
shutil.copy( srcFile, '%s/testFile.%s' % ( localDir, time.time() ) )
time.sleep( 1 )
# Check that we can successfully upload the directory to the storage element
dirDict = {destDirectory:localDir}
putDirRes = self.storageElement.putDirectory( dirDict )
# Get the directory metadata
#Clean up the locally created directory
shutil.rmtree( localDir )
getDirRes = self.storageElement.getDirectory( destDirectory, localPath = localDir )
# Now remove the remove directory
removeDirRes = self.storageElement.removeDirectory( destDirectory, recursive = True )
#Clean up the locally created directory
if os.path.exists( localDir ):
shutil.rmtree( localDir )
# Perform the checks for the put dir operation
self.assert_( putDirRes['OK'] )
self.assert_( putDirRes['Value'] )
for _dir in dirDict:
if putDirRes['Value']['Successful'][_dir]['Files']:
self.assertEqual( putDirRes['Value']['Successful'][_dir]['Files'], self.numberOfFiles )
self.assertEqual( putDirRes['Value']['Successful'][_dir]['Size'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( putDirRes['Value']['Successful'][_dir]['Files'] ) in [types.IntType, types.LongType] )
self.assert_( type( putDirRes['Value']['Successful'][_dir]['Size'] ) in [types.IntType, types.LongType] )
# Perform the checks for the get directory operation
self.assert_( getDirRes['OK'] )
self.assert_( getDirRes['Value'] )
for _dir in dirDict:
if getDirRes['Value']['Successful'][_dir]['Files']:
self.assertEqual( getDirRes['Value']['Successful'][_dir]['Files'], self.numberOfFiles )
self.assertEqual( getDirRes['Value']['Successful'][_dir]['Size'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( getDirRes['Value']['Successful'][_dir]['Files'] ) in [types.IntType, types.LongType] )
self.assert_( type( getDirRes['Value']['Successful'][_dir]['Size'] ) in [types.IntType, types.LongType] )
# Perform the checks for the remove directory operation
self.assert_( removeDirRes['OK'] )
self.assert_( removeDirRes['Value'] )
if removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved']:
self.assertEqual( removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved'], self.numberOfFiles )
self.assertEqual( removeDirRes['Value']['Successful'][destDirectory]['SizeRemoved'], self.numberOfFiles * sizeOfLocalFile )
self.assert_( type( removeDirRes['Value']['Successful'][destDirectory]['FilesRemoved'] ) in [types.IntType, types.LongType] )
self.assert_( type( removeDirRes['Value']['Successful'][destDirectory]['SizeRemoved'] ) in [types.IntType, types.LongType] )
if __name__ == '__main__':
gLogger.setLevel( "DEBUG" )
suite = unittest.defaultTestLoader.loadTestsFromTestCase( DirectoryTestCases )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( FileTestCases ) )
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(GetInfoTestCase))
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
| gpl-3.0 | -2,148,420,408,907,544,000 | 53.349929 | 133 | 0.668159 | false |
gabrielelanaro/chemview | chemview/gg.py | 1 | 15416 | """GGplot like interface"""
import uuid
import matplotlib as mpl
import matplotlib.cm as cm
import numpy as np
from IPython.display import Image, display
from .utils import get_atom_color
from .widget import RepresentationViewer, TrajectoryControls
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def copy(self):
return type(self)(self)
class Aes(AttrDict):
def __init__(self, *args, **kwargs):
super(Aes, self).__init__(*args, **kwargs)
def __repr__(self):
return str(self.copy())
def updated(self, other):
copy = self.copy()
copy.update(other)
return copy
class ggview(object):
def __init__(self, aes=Aes()):
self.aes = aes
self.geometries = []
self.scales = []
def display(self):
# Generate primitives
aes = self.aes
# Apply scale that map data to aes
for scale in self.scales:
aes = scale.apply(aes)
primitives = []
for geometry in self.geometries:
primitives.extend(geometry.produce(aes))
# We generate a json description
rv = RepresentationViewer.from_scene({"representations" : primitives})
for scale in self.scales:
scale.render(rv)
if 'xyz' in self.aes:
rv.autozoom(self.aes.xyz)
return rv
def _ipython_display_(self):
rv = self.display()
return rv._ipython_display_()
def __add__(self, other):
if isinstance(other, Geom):
self.geometries.append(other)
elif isinstance(other, Scale):
self.scales.append(other)
else:
raise ValueError("Data type not understood {}".format(type(other)))
return self
class ggtraj(ggview):
def __init__(self, frames, aes=Aes()):
frame_aes = ggtraj._make_frame_aes(aes, 0)
super(ggtraj, self).__init__(frame_aes)
self.frames = frames
self.traj_aes = aes
self.update_funcs = []
def display(self):
# Generate primitives
aes = self.aes
# Apply scale that map data to aes
for scale in self.scales:
scale.render()
aes = scale.apply(aes)
primitives = []
for geometry in self.geometries:
prims = geometry.produce(aes)
primitives.extend(prims)
self.update_funcs.append((prims[0]["rep_id"], geometry.update))
rv = RepresentationViewer.from_scene({"representations" : primitives})
tc = TrajectoryControls(self.frames)
tc.on_frame_change(lambda frame, self=self, widget=rv: self.update(widget, frame))
# Add trajectory viewer too
display(rv)
display(tc)
return tc, rv
@staticmethod
def _make_frame_aes(aes, frame):
frame_aes = Aes()
# Make a copy
for k in aes.keys():
frame_aes[k] = aes[k]
# Override the traj ones
for k in aes.keys():
if k.endswith("_traj"):
frame_aes[k[:-5]] = aes[k][frame]
return frame_aes
def update(self, widget, frame):
for rep_id, func in self.update_funcs:
aes = ggtraj._make_frame_aes(self.traj_aes, frame)
for scale in self.scales:
aes = scale.apply(aes)
options = func(aes)
widget.update_representation(rep_id, options)
class Geom(object):
"""Base class for all geometric objects"""
def __init__(self, aes=Aes()):
self.aes = aes
def produce(self, aes=Aes()):
raise NotImplementedError()
def update(self, aes):
raise NotImplementedError()
class GeomPoints(Geom):
def produce(self, aes=Aes()):
# If an aes was passed, we override...
aes = aes.updated(self.aes)
# Return a dict of primitives produced from aes data
return [{
"rep_id" : uuid.uuid1().hex,
'rep_type': "points",
"options": { "coordinates": aes.xyz,
"colors": process_colors(len(aes.xyz), aes.get("colors", None)),
"sizes": process_sizes(len(aes.xyz), aes.get("sizes", 1)),
"visible": aes.get("visible", None) }
}]
def update(self, aes):
# we return options
return { "coordinates": aes.xyz,
"colors": process_colors(len(aes.xyz), aes.get("colors", None)),
"sizes": process_sizes(len(aes.xyz), aes.get("sizes", None)),
"visible": aes.get("visible", None) }
class GeomSpheres(Geom):
def produce(self, aes=Aes()):
# If an aes was passed, we override...
aes = aes.updated(self.aes)
# Return a dict of primitives produced from aes data
return [{
"rep_id" : uuid.uuid1().hex,
'rep_type': "spheres",
"options": { "coordinates": np.array(aes.xyz, dtype='float32'),
"colors": process_colors(len(aes.xyz), aes.get("colors", None)),
"radii": process_sizes(len(aes.xyz), aes.get("sizes", 1)),
"visible": aes.get("visible", None) }
}]
class GeomLines(Geom):
def produce(self, aes=Aes()):
# Return a dict of primitives produced from aes data
aes = aes.updated(self.aes)
xyz = np.array(aes.xyz)
edges = np.array(aes.edges, 'uint32')
colors = process_colors(len(aes.edges), aes.get("colors", None))
return [{ "rep_id" : uuid.uuid1().hex,
'rep_type': "lines",
"options" : {
"startCoords": np.take(xyz, edges[:, 0], axis=0),
"endCoords": np.take(xyz, edges[:, 1], axis=0),
"startColors": colors,
"endColors": colors}
}]
class GeomCylinders(Geom):
def produce(self, aes=Aes()):
# Return a dict of primitives produced from aes data
aes = aes.updated(self.aes)
xyz = np.array(aes.xyz)
edges = np.array(aes.edges, 'uint32')
colors = process_colors(len(edges), aes.get("colors", None))
return [{ "rep_id" : uuid.uuid1().hex,
'rep_type': "cylinders",
"options" : {
"startCoords": np.take(xyz, edges[:, 0], axis=0),
"endCoords": np.take(xyz, edges[:, 1], axis=0),
"colors": colors,
"radii": process_sizes(len(aes.edges), aes.get("sizes", None))}
}]
class GeomSurface(Geom):
def produce(self, aes=Aes()):
pass
from numpy.lib.stride_tricks import as_strided
def pairs(a):
"""Return array of pairs of adjacent elements in a.
>>> pairs([1, 2, 3, 4])
array([[1, 2],
[2, 3],
[3, 4]])
"""
a = np.asarray(a)
return as_strided(a, shape=(a.size - 1, 2), strides=a.strides * 2)
def groupby_ix(a):
p = pairs(a)
diff_ix = np.nonzero(p[:, 0] != p[:, 1])[0]
starts_ix = np.append(np.insert(diff_ix + 1, 0, 0), a.shape[0])
return pairs(starts_ix)
class GeomProteinCartoon(Geom):
def __init__(self, aes=Aes(), cmap=None):
super(GeomProteinCartoon, self).__init__(aes)
self.cmap = cmap or {'H': 0xff0000, 'E':0x00ffff, 'C':0xffffff}
# It is necessary to have
# aes.xyz (Coordinates)
# aes.types (Atom types)
# aes.secondary (secondary structure)
def produce(self, aes=Aes()):
aes = aes.updated(self.aes)
# Check if secondary_id is present, if not we generate a reasonable one
if not 'secondary_id' in aes:
pairs_ = groupby_ix(aes.secondary_type)
secondary_id = np.zeros_like(aes.secondary_type, dtype='int')
for k, (i,j) in enumerate(pairs_):
secondary_id[i:j] = k + 1
aes['secondary_id'] = secondary_id
aes['types'] = np.array(aes.types)
primitives = []
for xyz, normals in zip(*self._extract_helix_coords_normals(aes)):
g_helices = GeomRibbon(Aes(xyz=xyz, normals=normals, resolution=32),
color=self.cmap.get('H', 0xffffff))
primitives.extend(g_helices.produce(Aes()))
for xyz, normals in zip(*self._extract_sheet_coords_normals(aes)):
g_sheets = GeomRibbon(Aes(xyz=xyz, normals=normals, resolution=32),
arrow=True, color=self.cmap.get('E', 0xffffff))
primitives.extend(g_sheets.produce(Aes()))
for xyz in self._extract_coil_coords(aes):
g_coils = GeomTube(Aes(xyz=xyz), color=self.cmap.get('C', 0xffffff))
primitives.extend(g_coils.produce(Aes()))
return primitives
def _extract_helix_coords_normals(self, aes):
# First, extract the helices from the secondary
groups_ix = groupby_ix(aes.secondary_id)
helices_ix = groups_ix[aes.secondary_type[groups_ix[:, 0]] == 'H']
backbone_list = [aes.xyz[aes.types == 'CA'][i:j] for i, j in helices_ix if j - i]
normals_list = [alpha_helix_normals(backbone) for backbone in backbone_list]
return backbone_list, normals_list
def _extract_sheet_coords_normals(self, aes):
groups_ix = groupby_ix(aes.secondary_id)
sheets_ix = groups_ix[aes.secondary_type[groups_ix[:, 0]] == 'E']
ca_list = [aes.xyz[aes.types == 'CA'][i:j] for i, j in sheets_ix if j - i]
c_list = [aes.xyz[aes.types == 'C'][i:j] for i, j in sheets_ix if j - i]
o_list = [aes.xyz[aes.types == 'O'][i:j] for i, j in sheets_ix if j - i]
normals_list = [beta_sheet_normals(ca, c, o) for ca, c, o in zip(ca_list, c_list, o_list)]
return ca_list, normals_list
def _extract_coil_coords(self, aes):
groups_ix = groupby_ix(aes.secondary_id)
coils_ix = groups_ix[aes.secondary_type[groups_ix[:, 0]] == 'C']
# We remove id = 0 because they are heteroatoms
coils_id = aes.secondary_id[coils_ix[:, 0]]
coils_ix = coils_ix[coils_id != 0, :]
coils_ix[:, 1] += 1
coils_ix[:, 0] -= 1
coils_ix[coils_ix > len(aes.secondary_type)] = len(aes.secondary_type)
coils_ix[coils_ix < 0] = 0
backbone_list = [aes.xyz[aes.types == 'CA'][i:j] for i, j in coils_ix]
return backbone_list
from chemview.utils import normalized, beta_sheet_normals, alpha_helix_normals
class GeomRibbon(Geom):
def __init__(self, aes=Aes(), color=0xffffff, width=0.2, arrow=False):
super(GeomRibbon, self).__init__(aes)
self.color = color
self.width = width
self.arrow = arrow
def produce(self, aes=Aes()):
aes = aes.updated(self.aes)
xyz = np.array(aes.xyz)
normals = np.array(aes.normals)
return [{'rep_id': uuid.uuid1().hex,
'rep_type': 'ribbon',
'options': {
'coordinates': xyz,
'normals': normals,
'resolution': aes.get("resolution", 4),
'color': self.color,
'width': self.width,
'arrow': self.arrow
}}]
class GeomTube(Geom):
def __init__(self, aes=Aes(), color=0xffffff, radius=0.05, resolution=4):
super(GeomTube, self).__init__(aes)
self.color = color
self.radius = radius
self.resolution = 4
def produce(self, aes=Aes()):
aes = aes.updated(self.aes)
xyz = np.array(aes.xyz)
return [{'rep_id': uuid.uuid1().hex,
'rep_type': 'smoothtube',
'options': {
'coordinates': xyz,
'resolution': self.resolution,
'color': self.color,
'radius': self.radius
}}]
class Scale(object):
pass
class ScaleColorsGradient(Scale):
property = "colors"
def __init__(self, limits=None, palette="YlGnBu"):
self.limits = limits
self.palette = palette
def apply(self, aes):
aes = aes.copy()
colors = process_colors(len(aes.xyz), aes.get("colors", None), self.limits, self.palette)
aes.colors = colors
return aes
def render(self, widget):
import matplotlib as mpl
# Set the colormap and norm to correspond to the data for which
# the colorbar will be used.
cmap = mpl.cm.get_cmap(self.palette)
norm = mpl.colors.Normalize(vmin=self.limits[0], vmax=self.limits[1])
# Let's say we give 5 typical values
values = np.linspace(self.limits[0], self.limits[1], 5)
colors = [rgbfloat_to_hex(cmap(norm(v))) for v in values]
values = ["%.2f" % v for v in values]
widget._remote_call('addColorScale', colors=colors, values=values)
def rgbint_to_hex(rgb):
return (rgb[0] << 16) | (rgb[1] << 8) | rgb[2]
def rgbfloat_to_hex(rgb):
return (int(rgb[0] * 255) << 16) | (int(rgb[1]*255) << 8) | int(rgb[2] * 255)
def process_colors(size, colors, limits=None, palette="YlGnBu", cmap=None):
if colors is None:
return [0xffffff] * size
elif isinstance(colors, int):
return [colors] * size
elif isinstance(colors, list) and len(colors) == 0:
return [0xffffff] * size
elif isinstance(colors, list) and isinstance(colors[0], (str, bytes)):
return [get_atom_color(c) for c in colors]
elif isinstance(colors, list) and isinstance(colors[0], (int, np.int32, np.int64, np.int16)):
# We cast to 32 bit
return [int(c) for c in colors]
elif isinstance(colors, np.ndarray):
return process_colors(size, colors.tolist(), limits, palette)
elif isinstance(colors, list) and isinstance(colors[0], (float, np.float32, np.float64)):
if limits is None:
vmin = min(colors)
vmax = max(colors)
else:
vmin, vmax = limits
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
cmap = cm.get_cmap(palette)
m = cm.ScalarMappable(norm=norm, cmap=cmap)
return [rgbint_to_hex(c) for c in m.to_rgba(colors, bytes=True)[:, :3]]
else:
raise ValueError("Wrong color format : {}".format(type(colors)))
def process_sizes(size, sizes):
if sizes is None:
return [1.0] * size
if isinstance(sizes, (float, int)):
return [sizes] * size
elif isinstance(sizes, list) and len(sizes) == 0:
return [1.0] * size
elif isinstance(sizes, list) and isinstance(sizes[0], (int, float)):
return sizes
else:
raise ValueError("Wrong sizes format")
| lgpl-2.1 | 3,892,482,831,021,735,400 | 31.8 | 98 | 0.534315 | false |
ckolumbus/mikidown | mikidown/mikibook.py | 1 | 9402 | """
Notebook management module.
"""
import os
from PyQt4.QtCore import Qt, QDir, QFile, QSettings, QSize
from PyQt4.QtGui import (QAbstractItemDelegate, QAbstractItemView, QColor, QDialog, QDialogButtonBox, QFileDialog, QFont, QGridLayout, QLabel, QLineEdit, QListWidget, QListWidgetItem, QPen, QPushButton, QStyle)
import mikidown
from .config import Setting, readListFromSettings, writeListToSettings
class ListDelegate(QAbstractItemDelegate):
""" Customize view and behavior of notebook list """
def __init__(self, parent=None):
super(ListDelegate, self).__init__(parent)
def paint(self, painter, option, index):
r = option.rect
fontPen = QPen(QColor.fromRgb(51, 51, 51), 1, Qt.SolidLine)
if option.state & QStyle.State_Selected:
painter.setBrush(Qt.cyan)
painter.drawRect(r)
else:
painter.setBrush(
Qt.white if (index.row() % 2) == 0 else QColor(252, 252, 252))
painter.drawRect(r)
painter.setPen(fontPen)
name = index.data(Qt.DisplayRole)
path = index.data(Qt.UserRole)
imageSpace = 10
# notebook name
r = option.rect.adjusted(imageSpace, 0, -10, -20)
painter.setFont(QFont('Lucida Grande', 10, QFont.Bold))
painter.drawText(r.left(), r.top(
), r.width(), r.height(), Qt.AlignBottom|Qt.AlignLeft, name)
# notebook path
r = option.rect.adjusted(imageSpace, 20, -10, 0)
painter.setFont(QFont('Lucida Grande', 8, QFont.Normal))
painter.drawText(
r.left(), r.top(), r.width(), r.height(), Qt.AlignLeft, path)
def sizeHint(self, option, index):
return QSize(200, 40)
class NotebookListDialog(QDialog):
""" Funtions to display, create, remove, modify notebookList """
def __init__(self, parent=None):
super(NotebookListDialog, self).__init__(parent)
self.notebookList = QListWidget()
self.moveUp = QPushButton('<<')
self.moveDown = QPushButton('>>')
self.add = QPushButton('Add')
self.remove = QPushButton('Remove')
self.buttonBox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
layout = QGridLayout()
layout.addWidget(self.notebookList, 0, 0, 4, 6)
layout.addWidget(self.moveUp, 1, 6)
layout.addWidget(self.moveDown, 2, 6)
layout.addWidget(self.add, 4, 0)
layout.addWidget(self.remove, 4, 1)
layout.addWidget(self.buttonBox, 4, 5, 1, 2)
self.setLayout(layout)
self.notebookList.setItemDelegate(ListDelegate(self.notebookList))
self.notebookList.currentRowChanged.connect(self.updateUi)
self.add.clicked.connect(self.actionAdd)
self.remove.clicked.connect(self.actionRemove)
self.moveUp.clicked.connect(self.moveItemUp)
self.moveDown.clicked.connect(self.moveItemDown)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.initList()
def initList(self):
self.notebookList.clear()
notebooks = Mikibook.read()
for nb in notebooks:
item = QListWidgetItem()
item.setData(Qt.DisplayRole, nb[0])
item.setData(Qt.UserRole, nb[1])
self.notebookList.addItem(item)
self.updateUi(len(notebooks) != 0)
self.notebookList.setCurrentRow(0)
# QListWidgetItem(nb, self.notebookList)
def updateUi(self, row):
flag = (row != -1)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(flag)
self.remove.setEnabled(flag)
self.moveUp.setEnabled(flag)
self.moveDown.setEnabled(flag)
def actionAdd(self):
Mikibook.create()
self.initList()
count = self.notebookList.count()
self.notebookList.setCurrentRow(count-1)
def actionRemove(self):
item = self.notebookList.currentItem()
row = self.notebookList.currentRow()
name = item.data(Qt.DisplayRole)
path = item.data(Qt.UserRole)
self.notebookList.takeItem(row)
Mikibook.remove(name, path)
def moveItemUp(self):
item = self.notebookList.currentItem()
row = self.notebookList.currentRow()
if row != 0:
# self.notebookList.removeItemWidget(item)
self.notebookList.takeItem(row)
self.notebookList.insertItem(row-1, item)
self.notebookList.setCurrentRow(row-1)
def moveItemDown(self):
item = self.notebookList.currentItem()
row = self.notebookList.currentRow()
count = self.notebookList.count()
if row != count-1:
self.notebookList.takeItem(row)
self.notebookList.insertItem(row+1, item)
self.notebookList.setCurrentRow(row+1)
def accept(self):
notebookPath = self.notebookList.currentItem().data(Qt.UserRole)
notebookName = self.notebookList.currentItem().data(Qt.DisplayRole)
settings = Setting([[notebookName, notebookPath]])
window = mikidown.MikiWindow(settings)
window.show()
count = self.notebookList.count()
notebooks = []
for i in range(count):
name = self.notebookList.item(i).data(Qt.DisplayRole)
path = self.notebookList.item(i).data(Qt.UserRole)
notebooks.append([name, path])
Mikibook.write(notebooks)
QDialog.accept(self)
class NewNotebookDlg(QDialog):
def __init__(self, parent=None):
super(NewNotebookDlg, self).__init__(parent)
self.setWindowTitle('Add Notebook - mikidown')
tipLabel = QLabel('Choose a name and folder for your notebook.' +
'\nThe folder can be an existing notebook folder.')
self.nameEditor = QLineEdit()
self.nameEditor.setText('Notes')
nameLabel = QLabel('Name:')
nameLabel.setBuddy(self.nameEditor)
self.pathEditor = QLineEdit()
# self.pathEditor.setText('~/mikidown')
self.pathEditor.setText(os.environ['HOME']+'/mikinotes')
pathLabel = QLabel('Path:')
pathLabel.setBuddy(self.pathEditor)
browse = QPushButton('Browse')
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
grid = QGridLayout()
grid.setRowMinimumHeight(1, 10)
grid.setRowMinimumHeight(4, 10)
grid.addWidget(tipLabel, 0, 0, 1, 4)
grid.addWidget(nameLabel, 2, 0)
grid.addWidget(self.nameEditor, 2, 1, 1, 4)
grid.addWidget(pathLabel, 3, 0)
grid.addWidget(self.pathEditor, 3, 1, 1, 4)
grid.addWidget(browse, 3, 5)
grid.addWidget(buttonBox, 5, 4, 1, 2)
self.setLayout(grid)
browse.clicked.connect(self.browse)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
def browse(self):
default = os.environ['HOME']
path = QFileDialog.getExistingDirectory(self,
"Select Folder",
default,
QFileDialog.ShowDirsOnly)
self.pathEditor.setText(path)
def closeEvent(self, event):
event.accept()
class Mikibook():
# ~/.config/mikidown/mikidown.conf
settings = QSettings('mikidown', 'mikidown')
@staticmethod
def read():
""" Read notebook list from config file """
return readListFromSettings(Mikibook.settings, 'notebookList')
@staticmethod
def write(notebooks):
""" Write notebook list to config file """
return writeListToSettings(
Mikibook.settings, 'notebookList', notebooks)
@staticmethod
def create():
""" Display a dialog to set notebookName and notebookPath """
newNotebook = NewNotebookDlg()
if newNotebook.exec_():
notebookName = newNotebook.nameEditor.text()
notebookPath = newNotebook.pathEditor.text()
Mikibook.initialise(notebookName, notebookPath)
notebooks = Mikibook.read()
notebooks.append([notebookName, notebookPath])
# TODO: make mikidown.conf become plain text
Mikibook.write(notebooks)
@staticmethod
def initialise(notebookName, notebookPath):
""" Called by create()
A notebook directory will be initialised to:
css/ notebook.conf notes/
"""
# QDir().mkpath will create all necessary parent directories
QDir().mkpath(os.path.join(notebookPath, "notes"))
QDir().mkpath(os.path.join(notebookPath, "css"))
cssFile = os.path.join(notebookPath, "css", "notebook.css")
cssTemplate = "/usr/share/mikidown/notebook.css"
if not os.path.exists(cssTemplate):
cssTemplate = os.path.join(
os.path.dirname(__file__), "notebook.css")
# If //cssFile// already exists, copy() returns false!
QFile.copy(cssTemplate, cssFile)
@staticmethod
def remove(name, path):
notebooks = Mikibook.read()
notebooks.remove([name, path])
Mikibook.write(notebooks)
| mit | -3,307,863,633,545,682,000 | 35.726563 | 210 | 0.618379 | false |
suutari-ai/shoop | shuup_tests/admin/test_product_package.py | 3 | 2906 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from django.forms import formset_factory
from shuup.admin.modules.products.forms import (
PackageChildForm, PackageChildFormSet
)
from shuup.admin.modules.products.utils import clear_existing_package
from shuup.core.models import ProductMode
from shuup.testing.factories import create_product
from shuup.utils.excs import Problem
from shuup_tests.utils import printable_gibberish
from shuup_tests.utils.forms import get_form_data
@pytest.mark.django_db
def test_package_child_formset():
FormSet = formset_factory(PackageChildForm, PackageChildFormSet, extra=5, can_delete=True)
parent = create_product(printable_gibberish())
child = create_product(printable_gibberish())
# No products in the package
formset = FormSet(parent_product=parent)
assert formset.initial_form_count() == 0 # No children yet
assert not parent.get_all_package_children()
data = dict(get_form_data(formset, True), **{"form-0-child": child.pk, "form-0-quantity": 2})
formset = FormSet(parent_product=parent, data=data)
formset.save()
assert parent.get_all_package_children()
clear_existing_package(parent)
assert not parent.get_all_package_children()
@pytest.mark.django_db
def test_product_not_in_normal_mode():
FormSet = formset_factory(PackageChildForm, PackageChildFormSet, extra=5, can_delete=True)
parent = create_product(printable_gibberish())
child_1 = create_product(printable_gibberish())
child_1.link_to_parent(parent)
child_2 = create_product(printable_gibberish())
parent.verify_mode()
assert parent.mode == ProductMode.SIMPLE_VARIATION_PARENT
# Trying to create a package from a non-normal mode product
with pytest.raises(Problem):
formset = FormSet(parent_product=parent)
data = dict(get_form_data(formset, True), **{"form-0-child": child_2.pk, "form-0-quantity": 2})
formset = FormSet(parent_product=parent, data=data)
formset.save()
@pytest.mark.django_db
def test_cannot_add_product_to_own_package(rf):
FormSet = formset_factory(PackageChildForm, PackageChildFormSet, extra=5, can_delete=True)
parent = create_product(printable_gibberish())
# No products in the package
formset = FormSet(parent_product=parent)
assert formset.initial_form_count() == 0 # No children yet
assert not parent.get_all_package_children()
# Try to add a product to its own package
data = dict(get_form_data(formset, True), **{"form-0-child": parent.pk, "form-0-quantity": 2})
formset = FormSet(parent_product=parent, data=data)
formset.save()
assert not parent.get_all_package_children()
| agpl-3.0 | -6,532,020,556,294,720,000 | 36.74026 | 103 | 0.726772 | false |
biggihs/python-pptx | features/steps/axis.py | 1 | 10854 | # encoding: utf-8
"""Gherkin step implementations for chart axis features."""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from behave import given, then, when
from pptx import Presentation
from pptx.enum.chart import XL_AXIS_CROSSES, XL_CATEGORY_TYPE
from helpers import test_pptx
# given ===================================================
@given('a {axis_type} axis')
def given_a_axis_type_axis(context, axis_type):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = {
'category': chart.category_axis,
'value': chart.value_axis,
}[axis_type]
@given('a major gridlines')
def given_a_major_gridlines(context):
prs = Presentation(test_pptx('cht-gridlines-props'))
axis = prs.slides[0].shapes[0].chart.value_axis
context.gridlines = axis.major_gridlines
@given('a value axis having category axis crossing of {crossing}')
def given_a_value_axis_having_cat_ax_crossing_of(context, crossing):
slide_idx = {
'automatic': 0,
'maximum': 2,
'minimum': 3,
'2.75': 4,
'-1.5': 5,
}[crossing]
prs = Presentation(test_pptx('cht-axis-props'))
context.value_axis = prs.slides[slide_idx].shapes[0].chart.value_axis
@given('an axis')
def given_an_axis(context):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = chart.value_axis
@given('an axis having {a_or_no} title')
def given_an_axis_having_a_or_no_title(context, a_or_no):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[7].shapes[0].chart
context.axis = {
'a': chart.value_axis,
'no': chart.category_axis,
}[a_or_no]
@given('an axis having {major_or_minor} gridlines')
def given_an_axis_having_major_or_minor_gridlines(context, major_or_minor):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = chart.value_axis
@given('an axis having {major_or_minor} unit of {value}')
def given_an_axis_having_major_or_minor_unit_of_value(
context, major_or_minor, value):
slide_idx = 0 if value == 'Auto' else 1
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[slide_idx].shapes[0].chart
context.axis = chart.value_axis
@given('an axis of type {cls_name}')
def given_an_axis_of_type_cls_name(context, cls_name):
slide_idx = {
'CategoryAxis': 0,
'DateAxis': 6,
}[cls_name]
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[slide_idx].shapes[0].chart
context.axis = chart.category_axis
@given('an axis not having {major_or_minor} gridlines')
def given_an_axis_not_having_major_or_minor_gridlines(context, major_or_minor):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = chart.category_axis
@given('an axis title')
def given_an_axis_title(context):
prs = Presentation(test_pptx('cht-axis-props'))
context.axis_title = prs.slides[7].shapes[0].chart.value_axis.axis_title
@given('an axis title having {a_or_no} text frame')
def given_an_axis_title_having_a_or_no_text_frame(context, a_or_no):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[7].shapes[0].chart
axis = {
'a': chart.value_axis,
'no': chart.category_axis,
}[a_or_no]
context.axis_title = axis.axis_title
@given('tick labels having an offset of {setting}')
def given_tick_labels_having_an_offset_of_setting(context, setting):
slide_idx = {
'no explicit setting': 0,
'420': 1,
}[setting]
prs = Presentation(test_pptx('cht-ticklabels-props'))
chart = prs.slides[slide_idx].shapes[0].chart
context.tick_labels = chart.category_axis.tick_labels
# when ====================================================
@when('I assign {value} to axis.has_title')
def when_I_assign_value_to_axis_has_title(context, value):
context.axis.has_title = {'True': True, 'False': False}[value]
@when('I assign {value} to axis.has_{major_or_minor}_gridlines')
def when_I_assign_value_to_axis_has_major_or_minor_gridlines(
context, value, major_or_minor):
axis = context.axis
propname = 'has_%s_gridlines' % major_or_minor
new_value = {'True': True, 'False': False}[value]
setattr(axis, propname, new_value)
@when('I assign {value} to axis.{major_or_minor}_unit')
def when_I_assign_value_to_axis_major_or_minor_unit(
context, value, major_or_minor):
axis = context.axis
propname = '%s_unit' % major_or_minor
new_value = {'8.4': 8.4, '5': 5, 'None': None}[value]
setattr(axis, propname, new_value)
@when('I assign {value} to axis_title.has_text_frame')
def when_I_assign_value_to_axis_title_has_text_frame(context, value):
context.axis_title.has_text_frame = {'True': True, 'False': False}[value]
@when('I assign {value} to tick_labels.offset')
def when_I_assign_value_to_tick_labels_offset(context, value):
new_value = int(value)
context.tick_labels.offset = new_value
@when('I assign {member} to value_axis.crosses')
def when_I_assign_member_to_value_axis_crosses(context, member):
value_axis = context.value_axis
value_axis.crosses = getattr(XL_AXIS_CROSSES, member)
@when('I assign {value} to value_axis.crosses_at')
def when_I_assign_value_to_value_axis_crosses_at(context, value):
new_value = None if value == 'None' else float(value)
context.value_axis.crosses_at = new_value
# then ====================================================
@then('axis.axis_title is an AxisTitle object')
def then_axis_axis_title_is_an_AxisTitle_object(context):
class_name = type(context.axis.axis_title).__name__
assert class_name == 'AxisTitle', 'got %s' % class_name
@then('axis.category_type is XL_CATEGORY_TYPE.{member}')
def then_axis_category_type_is_XL_CATEGORY_TYPE_member(context, member):
expected_value = getattr(XL_CATEGORY_TYPE, member)
category_type = context.axis.category_type
assert category_type is expected_value, 'got %s' % category_type
@then('axis.format is a ChartFormat object')
def then_axis_format_is_a_ChartFormat_object(context):
axis = context.axis
assert type(axis.format).__name__ == 'ChartFormat'
@then('axis.format.fill is a FillFormat object')
def then_axis_format_fill_is_a_FillFormat_object(context):
axis = context.axis
assert type(axis.format.fill).__name__ == 'FillFormat'
@then('axis.format.line is a LineFormat object')
def then_axis_format_line_is_a_LineFormat_object(context):
axis = context.axis
assert type(axis.format.line).__name__ == 'LineFormat'
@then('axis.has_title is {value}')
def then_axis_has_title_is_value(context, value):
axis = context.axis
actual_value = axis.has_title
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('axis.has_{major_or_minor}_gridlines is {value}')
def then_axis_has_major_or_minor_gridlines_is_expected_value(
context, major_or_minor, value):
axis = context.axis
actual_value = {
'major': axis.has_major_gridlines,
'minor': axis.has_minor_gridlines,
}[major_or_minor]
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('axis.major_gridlines is a MajorGridlines object')
def then_axis_major_gridlines_is_a_MajorGridlines_object(context):
axis = context.axis
assert type(axis.major_gridlines).__name__ == 'MajorGridlines'
@then('axis.{major_or_minor}_unit is {value}')
def then_axis_major_or_minor_unit_is_value(context, major_or_minor, value):
axis = context.axis
propname = '%s_unit' % major_or_minor
actual_value = getattr(axis, propname)
expected_value = {
'20.0': 20.0, '8.4': 8.4, '5.0': 5.0, '4.2': 4.2, 'None': None
}[value]
assert actual_value == expected_value, 'got %s' % actual_value
@then('axis_title.format is a ChartFormat object')
def then_axis_title_format_is_a_ChartFormat_object(context):
class_name = type(context.axis_title.format).__name__
assert class_name == 'ChartFormat', 'got %s' % class_name
@then('axis_title.format.fill is a FillFormat object')
def then_axis_title_format_fill_is_a_FillFormat_object(context):
class_name = type(context.axis_title.format.fill).__name__
assert class_name == 'FillFormat', 'got %s' % class_name
@then('axis_title.format.line is a LineFormat object')
def then_axis_title_format_line_is_a_LineFormat_object(context):
class_name = type(context.axis_title.format.line).__name__
assert class_name == 'LineFormat', 'got %s' % class_name
@then('axis_title.has_text_frame is {value}')
def then_axis_title_has_text_frame_is_value(context, value):
actual_value = context.axis_title.has_text_frame
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('axis_title.text_frame is a TextFrame object')
def then_axis_title_text_frame_is_a_TextFrame_object(context):
class_name = type(context.axis_title.text_frame).__name__
assert class_name == 'TextFrame', 'got %s' % class_name
@then('gridlines.format is a ChartFormat object')
def then_gridlines_format_is_a_ChartFormat_object(context):
gridlines = context.gridlines
assert type(gridlines.format).__name__ == 'ChartFormat'
@then('gridlines.format.fill is a FillFormat object')
def then_gridlines_format_fill_is_a_FillFormat_object(context):
gridlines = context.gridlines
assert type(gridlines.format.fill).__name__ == 'FillFormat'
@then('gridlines.format.line is a LineFormat object')
def then_gridlines_format_line_is_a_LineFormat_object(context):
gridlines = context.gridlines
assert type(gridlines.format.line).__name__ == 'LineFormat'
@then('tick_labels.offset is {value}')
def then_tick_labels_offset_is_expected_value(context, value):
expected_value = int(value)
tick_labels = context.tick_labels
assert tick_labels.offset == expected_value, (
'got %s' % tick_labels.offset
)
@then('value_axis.crosses is {member}')
def then_value_axis_crosses_is_value(context, member):
value_axis = context.value_axis
expected_value = getattr(XL_AXIS_CROSSES, member)
assert value_axis.crosses == expected_value, 'got %s' % value_axis.crosses
@then('value_axis.crosses_at is {value}')
def then_value_axis_crosses_at_is_value(context, value):
value_axis = context.value_axis
expected_value = None if value == 'None' else float(value)
assert value_axis.crosses_at == expected_value, (
'got %s' % value_axis.crosses_at
)
| mit | -7,218,842,915,262,109,000 | 33.239748 | 79 | 0.669707 | false |
django-girls/best-blog-in-the-world | blog/migrations/0001_initial.py | 1 | 1051 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-16 20:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| mit | 6,504,924,610,036,035,000 | 32.903226 | 120 | 0.624167 | false |
amasiero/approach_control | approach_control_manipulator/nodes/approach_control_manipulator/GoTo.py | 1 | 6869 | #!/usr/bin/env python
import smach
import rospy
import math
import numpy as np
from std_msgs.msg import Float64
from dynamixel_msgs.msg import JointState
from dynamixel_controllers.srv import SetSpeed
from approach_control_manipulator.utils.arm_mod import Arm3Link
class GoTo(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes = ['success', 'in_progress','fail'], input_keys = ['coordX', 'coordY', 'coordZ'])
self.joint_position = dict()
self.count = 0
self.error_default = 0.04 # error for joint angles
self.diff_vel = [0, 0, 0] # list to receive difference betwween the last and the next angle values
self.speed = [0, 0, 0,] # list to receive speeds
self.velocity_limit = 0.8 # max velocity = 2.0
self.default_joint_speed = 0.3
self.angle = np.array([math.pi / 2, -math.pi / 2, 0, math.pi /2 ])
self.angles_now = self.angle
self.servo_speed = dict()
self.controllers = ['tilt2_controller', 'tilt3_controller', 'tilt4_controller']
#Publishers
self.joint1 = rospy.Publisher('/tilt2_controller/command', Float64, queue_size = 1) # Joint 1
self.joint2 = rospy.Publisher('/tilt3_controller/command', Float64, queue_size = 1) # Joint 2
self.joint3 = rospy.Publisher('/tilt4_controller/command', Float64, queue_size = 1) # Joint 3
self.joint4 = rospy.Publisher('/tilt5_controller/command', Float64, queue_size = 1) # Wrist
self.base = rospy.Publisher('/tilt_controller/command', Float64, queue_size = 1) # Base
rospy.Rate(5)
def callback_joint1(self, data):
self.pos_joint1 = data.current_pos
self.error_joint1 = data.error
self.moving_joint1 = data.is_moving
def callback_joint2(self, data):
self.pos_joint2 = data.current_pos
self.error_joint2 = data.error
self.moving_joint2 = data.is_moving
def callback_joint3(self, data):
self.pos_joint3 = data.current_pos
self.error_joint3 = data.error
self.moving_joint3 = data.is_moving
def execute(self, userdata):
rospy.loginfo('Moving to point')
rospy.sleep(0.3)
# Subscribers
rospy.Subscriber('/tilt2_controller/state', JointState, self.callback_joint1, queue_size = 1)
rospy.Subscriber('/tilt3_controller/state', JointState, self.callback_joint2, queue_size = 1)
rospy.Subscriber('/tilt4_controller/state', JointState, self.callback_joint3, queue_size = 1)
# Making and calling joint speed services with a default speed
for controller in (self.controllers):
set_speed_service = '/' + controller + '/set_speed'
rospy.wait_for_service(set_speed_service)
self.servo_speed[controller] = rospy.ServiceProxy(set_speed_service, SetSpeed, persistent = True)
self.servo_speed[controller](self.default_joint_speed)
# Current position
self.current_pose_joint1 = self.pos_joint1
self.current_pose_joint2 = self.pos_joint2
self.current_pose_joint3 = self.pos_joint3
self.angles_now[0] = np.round(self.current_pose_joint1 - (1.98), 2)
self.angles_now[1] = np.round(self.current_pose_joint2 + (0.41), 2)
self.angles_now[2] = np.round(self.current_pose_joint3 + (0.46), 2)
# Create an Arm3Link
arm = Arm3Link(q0 = self.angle, L = np.array([130,133,225]))
arm.q = arm.inv_kin(xyz = [userdata.coordX, userdata.coordY, userdata.coordZ])
if not math.isnan(arm.q[0]):
# Transformations to interactions with dinamyxels servos
q1 = np.round(arm.q[0] - (1.92), 2)
q2 = np.round(arm.q[1] + (0.41), 2)
q3 = np.round(arm.q[2] + (0.46), 2)
q4 = np.round(arm.q[3] - (0.71), 2)
self.q_list = [q1, q2, q3]
# Vector with joint difference angles
self.diff_vel[0] = abs(abs(self.current_pose_joint1) - abs(q1))
self.diff_vel[1] = abs(abs(self.current_pose_joint2) - abs(q2))
self.diff_vel[2] = abs(abs(self.current_pose_joint3) - abs(q3))
# Sorting differences list
for x in range(len(self.diff_vel) - 1, 0, -1):
for i in range(x):
if self.diff_vel[i] > self.diff_vel[i + 1]:
temp = self.diff_vel[i]
temp_aux = self.controllers[i]
temp_aux2 = self.q_list[i]
self.diff_vel[i] = self.diff_vel[i + 1]
self.controllers[i] = self.controllers[i + 1]
self.q_list[i] = self.q_list[i + 1]
self.diff_vel[i + 1] = temp
self.controllers[i + 1] = temp_aux
self.q_list[i + 1] = temp_aux2
# Making the proportion values of speeds
self.speeds = np.round([(((self.diff_vel[0] * 100) / self.diff_vel[2]) / 100) \
* self.velocity_lim, (((self.diff_vel[1] * 100) / self.diff_vel[2]) / 100) \
* self.velocity_lim, self.velocity_lim], 3)
# Calling services and publishing joint values
for i in range(len(self.speeds)):
if self.speeds[i] == 0.0:
self.speeds[i] = 0.1
self.servo_speed[self.controllers[i]](self.speeds[i])
rospy.loginfo("\nSPEEDS: %s Joint: %s Diff: %s \n", str(self.speeds[i]), \
str(self.controller[i]), str(self.diff_vel[i]))
# Publishing joint values
rospy.loginfo("\n\nQ LIST: %s %s %s %s \n\n", str(q1), str(q2), str(q3), str(q4))
self.joint1.publish(q1)
self.joint2.publish(q2)
self.joint3.publish(q3)
self.base.publish(q4)
rospy.sleep(3)
while (self.moving_joint2 and self.moving_joint3 and self.moving_joint4):
rospy.loginfo('Moving to point')
# Errors
rospy.loginfo('\n Error joint1: %f \n Error joint2: %f \n Error joint3: %f', np.absolute(self.error_joint1), \
np.absolute(self.error_joint2), np.absolute(self.error_joint3))
if np.absolute(self.error_joint1) < self.error_default and np.absolute(self.error_joint2) < self.error_default \
and np.absolute(self.error_joint3) < self.error_default:
rospy.loginfo('Goal reached')
self.count = 0
return 'success'
elif self.count < 1:
rospy.loginfo('Sending Goal Again')
self.count += 1
rospy.sleep(0.1)
return 'in_progress'
elif (math.isnan(arm.q[0])):
rospy.logerr("\n\n\nNAN VALUE ERROR !!!\n\n\n")
return 'fail'
else:
return 'fail' | gpl-2.0 | -2,624,966,592,184,134,000 | 44.197368 | 125 | 0.575921 | false |
pielco11/SSMA | ssma.py | 1 | 28667 | #!/usr/bin/env python3
"""
@author: Lasha Khasaia
@license: GNU General Public License 3.0
@contact: @_qaz_qaz
@Description: SSMA - Simple Static Malware Analyzer
"""
import argparse, os, json
import shutil, magic, uuid
import hashlib, contextlib
from elasticsearch import Elasticsearch
from src import colors
from src.blacklisted_domain_ip import ransomware_and_malware_domain_check
from src.check import is_malware, is_file_packed, check_crypto, is_antidb_antivm, is_malicious_document, is_your_target
from src.check_file import PEScanner, ELFScanner, file_info
from src.check_updates import check_internet_connection, download_yara_rules_git
from src.check_virustotal import virustotal
from src.file_strings import get_strings
from src.mass_analysis import start_scan
from src.report import pe_report, elf_report, others_report
from src import markdown
####### NEED THIS FOR ELASTICSEARCH
@contextlib.contextmanager
def nostderr():
savestderr = sys.stderr
sys.stderr = os.devnull()
try:
yield
finally:
sys.stderr = savestderr
#####################################
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Simple Static Malware Analyzer")
parser.add_argument("filename", help="/path/to/file")
parser.add_argument("-k", "--api-key", help="Virustotal API key")
parser.add_argument("-d", "--document", help="check document/MS Office file", action="store_true")
parser.add_argument("-u", "--update", help="Update Yara-Rules (yes/no) usage ./ssma.py sample.exe -u yes")
parser.add_argument("-y", "--yara", help="Scan file with your Yara-Rule")
parser.add_argument("-D", "--directory", help="Mass analysis from a dir ./ssma.py (/path/.) period at end of path is necessary")
parser.add_argument("-r", "--report", help="Generate json format report (yes/no/elasticsearch) usage ./ssma.py sample.exe -r yes")
parser.add_argument("-t", "--table", help="Markdown output", action="store_true")
parser.add_argument("-s", "--strings", help="Extract strings", action="store_true")
args = parser.parse_args()
if args.report == "elasticsearch":
args.report = "output"
else:
pass
# Added by Yang
if args.directory:
start_scan(args)
exit()
elif args.directory and args.filename:
print(colors.BOLD + colors.RED + "option error, please select a file or directory, run ssma.py -h")
exit()
if args.report == "output":
pass
else:
print(colors.CYAN + """
███████╗███████╗███╗ ███╗ █████╗
██╔════╝██╔════╝████╗ ████║██╔══██╗ Simple
███████╗███████╗██╔████╔██║███████║ Static
╚════██║╚════██║██║╚██╔╝██║██╔══██║ Malware
███████║███████║██║ ╚═╝ ██║██║ ██║ Analyzer
╚══════╝╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝
""" + colors.RESET)
if args.update == "yes":
if os.path.exists("rules"):
shutil.rmtree("rules")
if os.path.exists("rules_compiled"):
shutil.rmtree("rules_compiled")
os.mkdir("rules_compiled")
print(colors.BOLD + colors.CYAN + "[-] Updating Yara-Rules..." + colors.RESET)
download_yara_rules_git()
print(colors.BOLD + colors.GREEN + "[+] Updated for Yara-Rules!" + colors.RESET)
print()
if not args.filename:
exit()
else:
pass
else:
pass
try:
os.path.realpath(args.filename)
except:
try:
os.path.realpath(args.directory)
except:
print(colors.BOLD + colors.RED + "No option selected, run ssma.py -h" + colors.RESET)
exit()
internet_connection = check_internet_connection()
py_file_location = os.path.dirname(__file__)
args.filename = os.path.realpath(args.filename)
if py_file_location:
os.chdir(py_file_location)
filetype = magic.from_file(args.filename, mime=True)
if filetype == 'application/x-dosexec':
pe = PEScanner(filename=args.filename)
if args.report == "output":
pass
else:
print(colors.BOLD + colors.YELLOW + "File Details: " + colors.RESET)
for n in pe.file_info(args.report, False):
if args.report == "output":
pass
else:
print('\t', n)
if args.report == "output":
pass
else:
print()
print("================================================================================")
if args.report:
if not os.path.exists("analysis_report"):
os.mkdir("analysis_report")
file_report = pe_report(pe, args.report, args.strings)
else:
sections = pe.sections_analysis(args.report)
print("================================================================================")
pe.overlay()
if args.report == "output":
pass
else:
print("================================================================================")
_tls = pe.checkTSL()
if _tls is not None:
if args.report == "output":
pass
else:
print(colors.RED + "The executable contains a .tls section.\n" + colors.RESET + "A TLS callback can be used to execute code before the entry point \
and therefore execute secretly in a debugger.")
print("================================================================================")
check_file_header = pe.check_file_header(args.report)
continue_message = False
if check_file_header["debug"]:
continue_message = True
print( # MAYBE A DUPLICATE WITH "check_file.py" #323 ?
colors.LIGHT_RED + "File contains some debug information, in majority of regular PE files, should not "
"contain debug information" + colors.RESET + "\n")
if any(tr[1] for tr in check_file_header["flags"]):
continue_message = True
if args.report == "output":
pass
else:
print(colors.LIGHT_RED + "Suspicious flags in the characteristics of the PE file: " + colors.RESET)
for n in check_file_header["flags"]:
if n[1]:
print(colors.RED + n[0] + colors.RESET + " flag is set - {}".format(n[2]))
print()
if args.report == "output":
pass
else:
if continue_message:
print("================================================================================")
check_date_result = pe.check_date(False)
if check_date_result:
if args.report == "output":
pass
else:
print(check_date_result)
print()
print("================================================================================")
check_imports_result = pe.check_imports()
if args.report == "output":
pass
else:
if check_imports_result:
print(
colors.BOLD + colors.YELLOW + "This file contains a list of Windows functions commonly used by malware.\nFor more information use the Microsoft documentation.\n" + colors.RESET)
for n in check_imports_result:
n = n.split("^")
print('\t' + colors.LIGHT_RED + n[0] + colors.RESET + " - " + n[1])
print()
print("================================================================================")
# ELF file -> Linux malware
# Added by Yang
elif filetype == 'application/x-executable':
elf = ELFScanner(filename=args.filename)
if args.report == "output":
pass
else:
print(colors.BOLD + colors.YELLOW + "File Details: " + colors.RESET)
for n in elf.file_info(args.report):
if args.report == "output":
print('\t', n)
else:
print('\t', n)
if args.report == "output":
pass
else:
print()
print("================================================================================")
depends = elf.dependencies()
if depends:
if args.report == "output":
pass
else:
print(colors.BOLD + colors.YELLOW + "Dependencies: " + colors.RESET)
for line in depends:
line = line.decode('utf-8', 'ignore').replace("\n", "")
print(line)
print()
print("================================================================================")
prog_header = elf.program_header()
if prog_header:
if args.report == "output":
pass
else:
print(colors.BOLD + colors.YELLOW + "Program Header Information: " + colors.RESET)
for line in prog_header:
line = line.decode('utf-8', 'ignore').replace("\n", "")
print(line)
print()
print("================================================================================")
sect_header = elf.section_header()
if sect_header:
if args.report == "output":
pass
else:
print(colors.BOLD + colors.YELLOW + "Section Header Information: " + colors.RESET)
for line in sect_header:
line = line.decode('utf-8', 'ignore').replace("\n", "")
print(line)
print()
print("================================================================================")
syms = elf.symbols()
if syms:
if args.report == "output":
pass
else:
print(colors.BOLD + colors.YELLOW + "Symbol Information: " + colors.RESET)
for line in syms:
line = line.decode('utf-8', 'ignore').replace("\n", "")
print(line)
print()
print("================================================================================")
checksec = elf.checksec()
if checksec:
if args.report == "output":
pass
else:
print(colors.BOLD + colors.YELLOW + "CheckSec Information: " + colors.RESET)
for key, value in checksec.items():
print(key + ": " + str(value))
print()
print("================================================================================")
if args.report:
if not os.path.exists("analysis_report"):
os.mkdir("analysis_report")
file_report = (elf, args.report, args.strings)
else:
print(colors.BOLD + colors.YELLOW + "File Details: " + colors.RESET)
for n in file_info(args.filename):
print('\t', n)
print()
print("================================================================================")
if args.report:
if not os.path.exists("analysis_report"):
os.mkdir("analysis_report")
file_report = others_report(file_info(args.filename), args.strings)
if args.api_key and internet_connection:
virus_check = virustotal(args.filename, args.api_key)
if virus_check[0] == "scan_result":
print(colors.BOLD + colors.YELLOW + "Virustotal:" + colors.RESET)
for n in virus_check[1]:
n = n.split("^")
print('\t' + colors.CYAN + n[0] + colors.RESET + "-" + colors.LIGHT_RED + n[1] + colors.RESET)
print()
print("================================================================================")
elif virus_check[0] == "permalink":
if virus_check[1]:
print(colors.LIGHT_BLUE + "Your file is being analysed." + colors.RESET)
print(colors.BOLD + "VirusTotal link: " + colors.RESET, virus_check[1][0])
print()
print("================================================================================")
if input("Continue? [Y/n] ") is 'n':
exit()
print()
elif args.api_key and not internet_connection:
print(colors.RED + "No internet connection" + colors.RESET)
print("================================================================================")
strings = get_strings(filename=args.filename).get_result()
if strings[0]:
if internet_connection:
mal_domains = ransomware_and_malware_domain_check(list(strings[0]))
if args.report == "output":
pass
else:
print(colors.BOLD + colors.YELLOW + "Possible domains in strings of the file: " + colors.RESET)
mal_domains = ransomware_and_malware_domain_check(list(strings[0]))
for n in mal_domains[0]:
print('\t', n)
print()
if mal_domains[1]:
print("\t" + colors.RED + "Abuse.ch's Ransomware Domain Blocklist: " + colors.RESET)
for n in mal_domains[1]:
print('\t', n)
print()
if mal_domains[2]:
print(
"\t" + colors.RED + "A list of domains that are known to be used to propagate malware by http://www.malwaredomains.com/" + colors.RESET)
for n in mal_domains[2]:
print('\t', n)
print()
print()
print("================================================================================")
if strings[1]:
if args.report == "output":
pass
else:
print(colors.BOLD + colors.YELLOW + "Possible IP addresses in strings of the file: " + colors.RESET)
for n in strings[1]:
print('\t', n)
print()
print("================================================================================")
if strings[2]:
if args.report == "output":
pass
else:
print(colors.BOLD + colors.YELLOW + "Possible E-Mail addresses in strings of the file:" + colors.RESET)
for n in strings[2]:
print('\t', n)
print()
print("================================================================================")
if args.report:
if internet_connection:
mal_domains = ransomware_and_malware_domain_check(list(strings[0]))
domains = {
"normal_domains": list(mal_domains[0]),
"malware_domains": list(mal_domains[1]) + list(mal_domains[2])
}
else:
domains = list(strings[0])
strings_result = {
"Domains": domains,
"IP-addresses": strings[1],
"Email": strings[2]
}
file_report.domains(strings_result)
if filetype == 'application/x-dosexec' or filetype == 'application/x-executable' or args.document:
if args.report == "output":
pass
else:
print(
colors.BOLD + colors.YELLOW + "Scan file using Yara-rules.\nWith Yara rules you can create a \"description\" of malware families to detect new samples.\n" + colors.BOLD + colors.CYAN + "\tFor more information: https://virustotal.github.io/yara/\n" + colors.RESET)
if not os.path.exists("rules"):
os.mkdir("rules")
if not os.path.exists("rules_compiled"):
os.mkdir("rules_compiled")
if not os.listdir("rules"):
if args.report == "output":
pass
else:
print(colors.BOLD + colors.CYAN + "Downloading Yara-rules... \n" + colors.RESET)
print()
download_yara_rules_git()
if filetype == 'application/x-dosexec':
malicious_software = is_malware(filename=args.filename)
if malicious_software:
if args.report == "output":
pass
else:
print(
colors.BOLD + colors.YELLOW + "These Yara rules specialised on the identification of well-known malware.\nResult: " + colors.RESET)
for n in malicious_software:
try:
print("\t {} - {}".format(n, n.meta['description']))
except:
print('\t', n)
print()
print("================================================================================")
packed = is_file_packed(filename=args.filename)
if packed:
if args.report == "output":
pass
else:
print(
colors.BOLD + colors.YELLOW + "These Yara Rules aimed to detect well-known sofware packers, that can be used by malware to hide itself.\nResult: " + colors.RESET)
for n in packed:
try:
print("\t {} - {}".format(n, n.meta['description']))
except:
print('\t', n)
print()
print("================================================================================")
crypto = check_crypto(filename=args.filename)
if crypto:
if args.report == "output":
pass
else:
print(
colors.BOLD + colors.YELLOW + "These Yara rules aimed to detect the existence of cryptographic algoritms." + colors.RESET)
print(colors.YELLOW + "Detected cryptographic algorithms: " + colors.RESET)
for n in crypto:
try:
print("\t {} - {}".format(n, n.meta['description']))
except:
print('\t', n)
print()
print("================================================================================")
anti_vm = is_antidb_antivm(filename=args.filename)
if anti_vm:
if args.report == "output":
pass
else:
print(
colors.BOLD + colors.YELLOW + "These Yara Rules aimed to detect anti-debug and anti-virtualization techniques used by malware to evade automated analysis.\nResult: " + colors.RESET)
for n in anti_vm:
try:
print("\t {} - {}".format(n, n.meta['description']))
except:
print('\t', n)
print()
print("================================================================================")
your_target = {}
if args.yara:
yara = str(os.path.realpath(args.yara))
your_target = is_your_target(args.filename, yara)
if your_target:
if args.report == "output":
pass
else:
print(
colors.BOLD + colors.YELLOW + "These Yara Rules are created by yourself and aimed to detecte something you need.\nResult: " + colors.RESET)
for n in your_target:
try:
print("\t {} - {}".format(n, n.meta['description']))
except:
print('\t', n)
print()
print("================================================================================")
if args.report:
malicious_software_result = {}
packed_result = {}
crypto_result = {}
anti_vm_result = {}
your_target_result = {}
if malicious_software:
for n in malicious_software:
try:
malicious_software_result[str(n)] = n.meta['description']
except:
malicious_software_result[str(n)] = None
if packed:
for n in packed:
try:
packed_result[str(n)] = n.meta['description']
except:
packed_result[str(n)] = None
if crypto:
for n in crypto:
try:
crypto_result[str(n)] = n.meta['description']
except:
crypto_result[str(n)] = None
if anti_vm:
for n in anti_vm:
try:
anti_vm_result[str(n)] = n.meta['description']
except:
anti_vm_result[str(n)] = None
if your_target:
for n in your_target:
try:
your_target_result[str(n)] = n.meta['description']
except:
your_target_result[str(n)] = None
yara_result = {
"malicious_software": malicious_software_result,
"packed": packed_result,
"crypto": crypto_result,
"anti_vm": anti_vm_result,
"your_target": your_target_result
}
file_report.yara(yara_result)
file_report.write()
if filetype == 'application/x-executable':
your_target = {}
if args.yara:
yara = str(os.path.realpath(args.yara))
your_target = is_your_target(args.filename, yara)
if your_target:
if args.report == "output":
pass
else:
print(
colors.BOLD + colors.YELLOW + "These Yara Rules are created by yourself and aimed to detecte something you need.\nResult: " + colors.RESET)
for n in your_target:
try:
print("\t {} - {}".format(n, n.meta['description']))
except:
print('\t', n)
print()
print("================================================================================")
if args.report:
your_target_result = {}
if your_target:
for n in your_target:
try:
your_target_result[str(n)] = n.meta['description']
except:
your_target_result[str(n)] = None
yara_result = {
"your_target": your_target_result
}
file_report.yara(yara_result)
file_report.write()
if args.document:
malicious_document = is_malicious_document(filename=args.filename)
if args.report == "output":
pass
else:
print(
colors.BOLD + colors.YELLOW + "These Yara Rules to be used with documents to find if they have been crafted to leverage malicious code.\nResult: " + colors.RESET)
if malicious_document:
for n in malicious_document:
try:
print("\t {} - {}".format(n, n.meta['description']))
except:
print('\t', n)
print("================================================================================")
your_target = {}
if args.yara:
yara = str(os.path.realpath(args.yara))
your_target = is_your_target(args.filename, yara)
if your_target:
if args.report == "output":
pass
else:
print(
colors.BOLD + colors.YELLOW + "These Yara Rules are created by yourself and aimed to detecte something you need.\nResult: " + colors.RESET)
for n in your_target:
try:
print("\t {} - {}".format(n, n.meta['description']))
except:
print('\t', n)
print()
print("================================================================================")
if args.report:
your_target_result = {}
if your_target:
for n in your_target:
try:
your_target_result[str(n)] = n.meta['description']
except:
your_target_result[str(n)] = None
malicious_document_result = {}
if malicious_document:
for n in malicious_document:
try:
malicious_document_result[str(n)] = n['description']
except:
malicious_document_result[str(n)] = None
yara_result = {
"malicious_document": malicious_document_result,
"your_target": your_target_result
}
file_report.yara(yara_result)
file_report.write()
else:
print(colors.BOLD + "\tAnalysis Complete" + colors.RESET)
print("================================================================================")
exit()
if args.report == "output":
rDump = file_report.dump()
with open(args.filename, "rb") as ff:
data = ff.read()
hashFile = hashlib.sha256(data).hexdigest()
jd = json.loads(rDump)
Sections = jd.get("sections").get("sections")
Functions = jd.get("imports")
Flags = jd.get("file_header").get("flags")
Doms = jd.get("malware_domains").get("Domains")
IPs = jd.get("malware_domains").get("IP-addresses")
Emails = jd.get("malware_domains").get("Email")
md = markdown.MarkDown([Sections, Functions, Flags, Doms, IPs, Emails])
mdOut = md.write()
body = file_report.malice_dump(mdOut)
if args.table:
print(mdOut)
try:
with nostderr():
es = Elasticsearch(["elasticsearch", "127.0.0.1", os.environ.get("MALICE_ELASTICSEARCH")])
res = es.update(index="malice", doc_type='sample', id=os.environ.get('MALICE_SCANID',hashFile), body=body)
except:
pass
else:
print(rDump)
try:
with nostderr():
es = Elasticsearch(["elasticsearch", "127.0.0.1", os.environ.get("MALICE_ELASTICSEARCH")])
res = es.update(index="malice", doc_type='sample', id=os.environ.get('MALICE_SCANID',hashFile), body=body)
except:
pass
else:
print(colors.YELLOW + "Ups... " + colors.CYAN + "That's all :)" + colors.RESET + "\n")
| gpl-3.0 | -1,196,994,951,961,589,500 | 41.4003 | 279 | 0.431526 | false |
davidsandberg/facenet | src/generative/modify_attribute.py | 1 | 6129 | # MIT License
#
# Copyright (c) 2017 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Modify attributes of images using attribute vectors calculated using
'calculate_attribute_vectors.py'. Images are generated from latent variables of
the CelebA dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sys
import argparse
import importlib
import facenet
import os
import numpy as np
import h5py
import math
from scipy import misc
def main(args):
img_mean = np.array([134.10714722, 102.52040863, 87.15436554])
img_stddev = np.sqrt(np.array([3941.30175781, 2856.94287109, 2519.35791016]))
vae_def = importlib.import_module(args.vae_def)
vae = vae_def.Vae(args.latent_var_size)
gen_image_size = vae.get_image_size()
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
images = tf.placeholder(tf.float32, shape=(None,gen_image_size,gen_image_size,3), name='input')
# Normalize
images_norm = (images-img_mean) / img_stddev
# Resize to appropriate size for the encoder
images_norm_resize = tf.image.resize_images(images_norm, (gen_image_size,gen_image_size))
# Create encoder network
mean, log_variance = vae.encoder(images_norm_resize, True)
epsilon = tf.random_normal((tf.shape(mean)[0], args.latent_var_size))
std = tf.exp(log_variance/2)
latent_var = mean + epsilon * std
# Create decoder
reconstructed_norm = vae.decoder(latent_var, False)
# Un-normalize
reconstructed = (reconstructed_norm*img_stddev) + img_mean
# Create a saver
saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)
# Start running operations on the Graph
gpu_memory_fraction = 1.0
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
with sess.as_default():
vae_checkpoint = os.path.expanduser(args.vae_checkpoint)
print('Restoring VAE checkpoint: %s' % vae_checkpoint)
saver.restore(sess, vae_checkpoint)
filename = os.path.expanduser(args.attributes_filename)
with h5py.File(filename,'r') as f:
latent_vars = np.array(f.get('latent_vars'))
attributes = np.array(f.get('attributes'))
#fields = np.array(f.get('fields'))
attribute_vectors = np.array(f.get('attribute_vectors'))
# Reconstruct faces while adding varying amount of the selected attribute vector
attribute_index = 31 # 31: 'Smiling'
image_indices = [8,11,13,18,19,26,31,39,47,54,56,57,58,59,60,73]
nrof_images = len(image_indices)
nrof_interp_steps = 10
sweep_latent_var = np.zeros((nrof_interp_steps*nrof_images, args.latent_var_size), np.float32)
for j in range(nrof_images):
image_index = image_indices[j]
idx = np.argwhere(attributes[:,attribute_index]==-1)[image_index,0]
for i in range(nrof_interp_steps):
sweep_latent_var[i+nrof_interp_steps*j,:] = latent_vars[idx,:] + 5.0*i/nrof_interp_steps*attribute_vectors[attribute_index,:]
recon = sess.run(reconstructed, feed_dict={latent_var:sweep_latent_var})
img = facenet.put_images_on_grid(recon, shape=(nrof_interp_steps*2,int(math.ceil(nrof_images/2))))
image_filename = os.path.expanduser(args.output_image_filename)
print('Writing generated image to %s' % image_filename)
misc.imsave(image_filename, img)
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('vae_def', type=str,
help='Model definition for the variational autoencoder. Points to a module containing the definition.')
parser.add_argument('vae_checkpoint', type=str,
help='Checkpoint file of a pre-trained variational autoencoder.')
parser.add_argument('attributes_filename', type=str,
help='The file containing the attribute vectors, as generated by calculate_attribute_vectors.py.')
parser.add_argument('output_image_filename', type=str,
help='File to write the generated image to.')
parser.add_argument('--latent_var_size', type=int,
help='Dimensionality of the latent variable.', default=100)
parser.add_argument('--seed', type=int,
help='Random seed.', default=666)
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
| mit | -569,988,245,719,070,850 | 42.161972 | 145 | 0.658182 | false |
pgagne/robottelo | robottelo/ssh.py | 1 | 18958 | """Utility module to handle the shared ssh connection."""
import base64
import logging
import os
import re
import time
import paramiko
import six
from fnmatch import fnmatch
from contextlib import contextmanager
from robottelo.cli import hammer
from robottelo.config import settings
logger = logging.getLogger(__name__)
class SSHCommandTimeoutError(Exception):
"""Raised when the SSH command has not finished executing after a
predefined period of time.
"""
def decode_to_utf8(text): # pragma: no cover
"""Paramiko returns bytes object and we need to ensure it is utf-8 before
parsing
"""
try:
return text.decode('utf-8')
except (AttributeError, UnicodeEncodeError):
return text
class SSHCommandResult(object):
"""Structure that returns in all ssh commands results."""
def __init__(
self, stdout=None, stderr=None, return_code=0, output_format=None):
self.stdout = stdout
self.stderr = stderr
self.return_code = return_code
self.output_format = output_format
# Does not make sense to return suspicious output if ($? <> 0)
if output_format and self.return_code == 0:
if output_format == 'csv':
self.stdout = hammer.parse_csv(stdout) if stdout else {}
if output_format == 'json':
self.stdout = hammer.parse_json(stdout) if stdout else None
def __repr__(self):
tmpl = u'SSHCommandResult(stdout={stdout!r}, stderr={stderr!r}, ' + \
u'return_code={return_code!r}, output_format={output_format!r})'
return tmpl.format(**self.__dict__)
class SSHClient(paramiko.SSHClient):
"""Extended SSHClient allowing custom methods"""
def run(self, cmd, *args, **kwargs):
"""This method exists to allow the reuse of existing connections when
running multiple ssh commands as in the following example of use:
with robotello.ssh.get_connection() as connection:
connection.run('ls /tmp')
connection.run('another command')
`self` is always passed as the connection when used in context manager
only when using `ssh.get_connection` function.
Note: This method is named `run` to avoid conflicts with existing
`exec_command` and local function `execute_command`.
"""
return execute_command(cmd, self, *args, **kwargs)
def _call_paramiko_sshclient(): # pragma: no cover
"""Call ``paramiko.SSHClient``.
This function does not alter the behaviour of ``paramiko.SSHClient``. It
exists solely for the sake of easing unit testing: it can be overridden for
mocking purposes.
"""
return SSHClient()
def get_client(hostname=None, username=None, password=None,
key_filename=None, timeout=None, port=22):
"""Returns a SSH client connected to given hostname"""
if hostname is None:
hostname = settings.server.hostname
if username is None:
username = settings.server.ssh_username
if key_filename is None and password is None:
key_filename = settings.server.ssh_key
if password is None:
password = settings.server.ssh_password
if timeout is None:
timeout = settings.ssh_client.connection_timeout
client = _call_paramiko_sshclient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
hostname=hostname,
username=username,
key_filename=key_filename,
password=password,
timeout=timeout,
port=port,
)
client._id = hex(id(client))
return client
@contextmanager
def get_connection(hostname=None, username=None, password=None,
key_filename=None, timeout=None, port=22):
"""Yield an ssh connection object.
The connection will be configured with the specified arguments or will
fall-back to server configuration in the configuration file.
Yield this SSH connection. The connection is automatically closed when the
caller is done using it using ``contextlib``, so clients should use the
``with`` statement to handle the object::
with get_connection() as connection:
...
:param str hostname: The hostname of the server to establish connection.If
it is ``None`` ``hostname`` from configuration's ``server`` section
will be used.
:param str username: The username to use when connecting. If it is ``None``
``ssh_username`` from configuration's ``server`` section will be used.
:param str password: The password to use when connecting. If it is ``None``
``ssh_password`` from configuration's ``server`` section will be used.
Should be applied only in case ``key_filename`` is not set
:param str key_filename: The path of the ssh private key to use when
connecting to the server. If it is ``None`` ``key_filename`` from
configuration's ``server`` section will be used.
:param int timeout: Time to wait for establish the connection.
:param int port: The server port to connect to, the default port is 22.
:return: An SSH connection.
:rtype: ``paramiko.SSHClient``
"""
if timeout is None:
timeout = settings.ssh_client.connection_timeout
client = get_client(
hostname, username, password, key_filename, timeout, port
)
try:
logger.debug('Instantiated Paramiko client {0}'.format(client._id))
logger.info('Connected to [%s]', hostname)
yield client
finally:
client.close()
logger.debug('Destroyed Paramiko client {0}'.format(client._id))
@contextmanager
def get_sftp_session(hostname=None, username=None,
password=None, key_filename=None, timeout=None):
"""Yield a SFTP session object.
The session will be configured with the host whose hostname is
passed as
argument.
Yield this SFTP Session. The session is automatically closed when
the caller is done using it using ``contextlib``, so clients should use
the``with`` statement to handle the object::
with get_sftp_session() as session:
...
:param str hostname: The hostname of the server to establish connection.If
it is ``None`` ``hostname`` from configuration's ``server`` section
will be used.
:param str username: The username to use when connecting.If it is ``None``
` `ssh_username`` from configuration's ``server`` section will be used.
:param str password: The password to use when connecting. If it is
``None`` ``ssh_password`` from configuration's ``server`` section
will be used. Should be applied only in case ``key_filename`` is not
set
:param str key_filename: The path of the ssh private key to use when
connecting to the server. If it is ``None`` ``key_filename`` from
configuration's ``server`` section will be used.
:param int timeout: Time to wait for establish the connection.
"""
with get_connection(hostname=hostname, username=username,
password=password, key_filename=key_filename,
timeout=timeout) as connection:
try:
sftp = connection.open_sftp()
yield sftp
finally:
sftp.close()
def add_authorized_key(key, hostname=None, username=None, password=None,
key_filename=None, timeout=None):
"""Appends a local public ssh key to remote authorized keys
refer to: remote_execution_ssh_keys provisioning template
:param key: either a file path, key string or a file-like obj to append.
:param str hostname: The hostname of the server to establish connection. If
it is ``None`` ``hostname`` from configuration's ``server`` section
will be used.
:param str username: The username to use when connecting. If it is ``None``
``ssh_username`` from configuration's ``server`` section will be used.
:param str password: The password to use when connecting. If it is ``None``
``ssh_password`` from configuration's ``server`` section will be used.
Should be applied only in case ``key_filename`` is not set
:param str key_filename: The path of the ssh private key to use when
connecting to the server. If it is ``None`` ``key_filename`` from
configuration's ``server`` section will be used.
:param int timeout: Time to wait for establish the connection.
"""
if getattr(key, 'read', None): # key is a file-like object
key_content = key.read() # pragma: no cover
elif is_ssh_pub_key(key): # key is a valid key-string
key_content = key
elif os.path.exists(key): # key is a path to a pub key-file
with open(key, 'r') as key_file: # pragma: no cover
key_content = key_file.read()
else:
raise AttributeError('Invalid key')
if timeout is None:
timeout = settings.ssh_client.connection_timeout
key_content = key_content.strip()
ssh_path = '~/.ssh'
auth_file = os.path.join(ssh_path, 'authorized_keys')
with get_connection(hostname=hostname, username=username,
password=password, key_filename=key_filename,
timeout=timeout) as con:
# ensure ssh directory exists
execute_command('mkdir -p %s' % ssh_path, con)
# append the key if doesn't exists
add_key = "grep -q '{key}' {dest} || echo '{key}' >> {dest}".format(
key=key_content, dest=auth_file)
execute_command(add_key, con)
# set proper permissions
execute_command('chmod 700 %s' % ssh_path, con)
execute_command('chmod 600 %s' % auth_file, con)
ssh_user = username or settings.server.ssh_username
execute_command('chown -R %s %s' % (ssh_user, ssh_path), con)
# Restore SELinux context with restorecon, if it's available:
cmd = 'command -v restorecon && restorecon -RvF %s || true' % ssh_path
execute_command(cmd, con)
def upload_file(local_file, remote_file, key_filename=None, hostname=None):
"""Upload a local file to a remote machine
:param local_file: either a file path or a file-like object to be uploaded.
:param remote_file: a remote file path where the uploaded file will be
placed.
:param hostname: target machine hostname. If not provided will be used the
``server.hostname`` from the configuration.
:param str key_filename: The path of the ssh private key to use when
connecting to the server. If it is ``None`` ``key_filename`` from
configuration's ``server`` section will be used.
"""
with get_sftp_session(hostname=hostname,
key_filename=key_filename) as sftp:
_upload_file(sftp, local_file, remote_file)
def upload_files(local_dir, remote_dir, file_search="*.txt",
hostname=None, key_filename=None):
""" Upload all files from directory to a remote directory
:param local_dir: all files from local path to be uploaded.
:param remote_dir: a remote path where the uploaded files will be
placed.
:param file_search: filter only files contains the type extension
:param hostname: target machine hostname. If not provided will be used the
``server.hostname`` from the configuration.
:param str key_filename: The path of the ssh private key to use when
connecting to the server. If it is ``None`` ``key_filename`` from
configuration's ``server`` section will be used.
"""
command("mkdir -p {}".format(remote_dir))
# making only one SFTP Session to transfer all files
with get_sftp_session(hostname=hostname,
key_filename=key_filename) as sftp:
for root, dirs, files in os.walk(local_dir):
for local_filename in files:
if fnmatch(local_filename, file_search):
remote_file = "{0}/{1}".format(remote_dir, local_filename)
local_file = os.path.join(local_dir, local_filename)
_upload_file(sftp, local_file, remote_file)
def _upload_file(sftp, local_file, remote_file):
""" Upload a file using existent sftp session
:param sftp: sftp session object
:param local_file: either a file path or a file-like object to be uploaded.
:param remote_file: a remote file path where the uploaded file will be
placed.
"""
# Check if local_file is a file-like object and use the proper
# paramiko function to upload it to the remote machine.
if hasattr(local_file, 'read'):
sftp.putfo(local_file, remote_file)
else:
sftp.put(local_file, remote_file)
def download_file(remote_file, local_file=None, hostname=None):
"""Download a remote file to the local machine. If ``hostname`` is not
provided will be used the server.
"""
if local_file is None: # pragma: no cover
local_file = remote_file
with get_connection(
hostname=hostname,
) as connection: # pragma: no cover
try:
sftp = connection.open_sftp()
sftp.get(remote_file, local_file)
finally:
sftp.close()
def command(cmd, hostname=None, output_format=None, username=None,
password=None, key_filename=None, timeout=None,
connection_timeout=None, port=22):
"""Executes SSH command(s) on remote hostname.
:param str cmd: The command to run
:param str output_format: json, csv or None
:param str hostname: The hostname of the server to establish connection. If
it is ``None`` ``hostname`` from configuration's ``server`` section
will be used.
:param str username: The username to use when connecting. If it is ``None``
``ssh_username`` from configuration's ``server`` section will be used.
:param str password: The password to use when connecting. If it is ``None``
``ssh_password`` from configuration's ``server`` section will be used.
Should be applied only in case ``key_filename`` is not set
:param str key_filename: The path of the ssh private key to use when
connecting to the server. If it is ``None`` ``key_filename`` from
configuration's ``server`` section will be used.
:param int timeout: Time to wait for the ssh command to finish.
:param connection_timeout: Time to wait for establishing the connection.
:param int port: The server port to connect to, the default port is 22.
"""
hostname = hostname or settings.server.hostname
if timeout is None:
timeout = settings.ssh_client.command_timeout
if connection_timeout is None:
connection_timeout = settings.ssh_client.connection_timeout
with get_connection(hostname=hostname, username=username,
password=password, key_filename=key_filename,
timeout=connection_timeout, port=port) as connection:
return execute_command(
cmd, connection, output_format, timeout, connection_timeout)
def execute_command(cmd, connection, output_format=None, timeout=None,
connection_timeout=None):
"""Execute a command via ssh in the given connection
:param cmd: a command to be executed via ssh
:param connection: SSH Paramiko client connection
:param output_format: base|json|csv|list valid only for hammer commands
:param timeout: Time to wait for the ssh command to finish.
:param connection_timeout: Time to wait for establishing the connection.
:return: SSHCommandResult
"""
if timeout is None:
timeout = settings.ssh_client.command_timeout
if connection_timeout is None:
connection_timeout = settings.ssh_client.connection_timeout
logger.info('>>> %s', cmd)
_, stdout, stderr = connection.exec_command(
cmd, timeout=connection_timeout)
if timeout:
# wait for the exit status ready
end_time = time.time() + timeout
while time.time() < end_time:
if stdout.channel.exit_status_ready():
break
time.sleep(1)
else:
logger.error('ssh command did not respond in the predefined time'
' (timeout=%s) and will be interrupted', timeout)
stdout.channel.close()
stderr.channel.close()
logger.error(
'[Captured stdout]\n{0}\n-----\n'.format(stdout.read())
)
logger.error(
'[Captured stderr]\n{0}\n-----\n'.format(stderr.read())
)
raise SSHCommandTimeoutError(
'ssh command: {0} \n did not respond in the predefined time '
'(timeout={1})'.format(cmd, timeout)
)
errorcode = stdout.channel.recv_exit_status()
stdout = stdout.read()
stderr = stderr.read()
# Remove escape code for colors displayed in the output
regex = re.compile(r'\x1b\[\d\d?m')
if stdout:
# Convert to unicode string
stdout = decode_to_utf8(stdout)
logger.info('<<< stdout\n%s', stdout)
if stderr:
# Convert to unicode string and remove all color codes characters
stderr = regex.sub('', decode_to_utf8(stderr))
logger.info('<<< stderr\n%s', stderr)
# Skip converting to list if 'plain', or the hammer options 'json' or 'base' are passed
if stdout and output_format not in ('json', 'base', 'plain'):
# Mostly only for hammer commands
# for output we don't really want to see all of Rails traffic
# information, so strip it out.
# Empty fields are returned as "" which gives us u'""'
stdout = stdout.replace('""', '')
stdout = u''.join(stdout).split('\n')
stdout = [
regex.sub('', line)
for line in stdout
if not line.startswith('[')
]
return SSHCommandResult(
stdout, stderr, errorcode, output_format)
def is_ssh_pub_key(key):
"""Validates if a string is in valid ssh pub key format
:param key: A string containing a ssh public key encoded in base64
:return: Boolean
"""
if not isinstance(key, six.string_types):
raise ValueError(
"Key should be a string type, received: %s" % type(key))
# 1) a valid pub key has 3 parts separated by space
try:
key_type, key_string, comment = key.split()
except ValueError: # need more than one value to unpack
return False
# 2) The second part (key string) should be a valid base64
try:
base64.decodebytes(key_string.encode('ascii'))
except base64.binascii.Error:
return False
# 3) The first part, the type, should be one of below
return key_type in (
'ecdsa-sha2-nistp256', 'ssh-dss', 'ssh-rsa', 'ssh-ed25519'
)
| gpl-3.0 | -8,305,814,937,608,385,000 | 39.33617 | 91 | 0.641893 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.