repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
14thibea/megamix | doc/example.py | 1 | 2598 | ########################
# Prelude to the example
########################
"""
This example is realized with a DP-VBGMM model
The other mixtures and the K-means are working in the same way
The available classes are:
- Kmeans (kmeans)
- GaussianMixture (GMM)
- VariationalGaussianMixture (VBGMM)
- DPVariationalGaussianMixture (DP-VBGMM)
"""
from megamix.batch import DPVariationalGaussianMixture
import numpy as np
########################
# Features used
########################
"""
Features must be numpy arrays of two dimensions:
the first dimension is the number of points
the second dimension is the dimension of the space
"""
# Here we use a radom set of points for the example
n_points = 10000
dim = 39
points = np.random.randn(n_points,dim)
########################
# Fitting the model
########################
# We choose the number of clusters that we want
n_components = 100
# The model is instantiated
GM = DPVariationalGaussianMixture(n_components)
# The model is fitting
GM.fit(points)
# It is also possible to do early stopping in order to avoid overfitting
points_data = points[:n_points//2:]
points_test = points[n_points//2::]
# In this case the model will fit only on points_data but will use points_test
# to evaluate the convergence criterion.
GM.fit(points_data,points_test)
# Some clusters may disappear with the DP-VBGMM model. You may want to
# simplify the model by removing the useless information
GM_simple = GM.simplified_model(points)
##########################
# Analysis of the model
##########################
other_points = np.random.randn(n_points,dim)
# We can obtain the log of the reponsibilities of any set of points when the
# model is fitted (or at least initialized)
log_resp = GM.predict_log_resp(other_points)
# log_resp.shape = (n_points,n_components)
# We can obtain the value of the convergence criterion for any set of points
score = GM.score(other_points)
#############################
# Writing or reading a model
#############################
# It is possible to write your model in a group of a h5py file
import h5py
file = h5py.File('DP_VBGMM.h5','w')
grp = file.create_group('model_fitted')
GM.write(grp)
file.close()
# You also can read data from such h5py file to initialize new models
GM_new = DPVariationalGaussianMixture()
file = h5py.File('DP_VBGMM.h5','r')
grp = file['model_fitted']
GM_new.read_and_init(grp,points)
file.close()
# You can also save regurlarly your code while fitting the model by using
# the saving parameter
GM.fit(points,saving='log',directory='mypath',legend='wonderful_model') | apache-2.0 | 5,257,591,040,589,627,000 | 25.793814 | 78 | 0.675905 | false |
pravsripad/mne-python | tutorials/source-modeling/plot_compute_covariance.py | 4 | 8732 | """
.. _tut_compute_covariance:
Computing a covariance matrix
=============================
Many methods in MNE, including source estimation and some classification
algorithms, require covariance estimations from the recordings.
In this tutorial we cover the basics of sensor covariance computations and
construct a noise covariance matrix that can be used when computing the
minimum-norm inverse solution. For more information, see
:ref:`minimum_norm_estimates`.
"""
import os.path as op
import mne
from mne.datasets import sample
###############################################################################
# Source estimation method such as MNE require a noise estimations from the
# recordings. In this tutorial we cover the basics of noise covariance and
# construct a noise covariance matrix that can be used when computing the
# inverse solution. For more information, see :ref:`minimum_norm_estimates`.
data_path = sample.data_path()
raw_empty_room_fname = op.join(
data_path, 'MEG', 'sample', 'ernoise_raw.fif')
raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname)
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference('average', projection=True)
raw.info['bads'] += ['EEG 053'] # bads + 1 more
###############################################################################
# The definition of noise depends on the paradigm. In MEG it is quite common
# to use empty room measurements for the estimation of sensor noise. However if
# you are dealing with evoked responses, you might want to also consider
# resting state brain activity as noise.
# First we compute the noise using empty room recording. Note that you can also
# use only a part of the recording with tmin and tmax arguments. That can be
# useful if you use resting state as a noise baseline. Here we use the whole
# empty room recording to compute the noise covariance (``tmax=None`` is the
# same as the end of the recording, see :func:`mne.compute_raw_covariance`).
#
# Keep in mind that you want to match your empty room dataset to your
# actual MEG data, processing-wise. Ensure that filters
# are all the same and if you use ICA, apply it to your empty-room and subject
# data equivalently. In this case we did not filter the data and
# we don't use ICA. However, we do have bad channels and projections in
# the MEG data, and, hence, we want to make sure they get stored in the
# covariance object.
raw_empty_room.info['bads'] = [
bb for bb in raw.info['bads'] if 'EEG' not in bb]
raw_empty_room.add_proj(
[pp.copy() for pp in raw.info['projs'] if 'EEG' not in pp['desc']])
noise_cov = mne.compute_raw_covariance(
raw_empty_room, tmin=0, tmax=None)
###############################################################################
# Now that you have the covariance matrix in an MNE-Python object you can
# save it to a file with :func:`mne.write_cov`. Later you can read it back
# using :func:`mne.read_cov`.
#
# You can also use the pre-stimulus baseline to estimate the noise covariance.
# First we have to construct the epochs. When computing the covariance, you
# should use baseline correction when constructing the epochs. Otherwise the
# covariance matrix will be inaccurate. In MNE this is done by default, but
# just to be sure, we define it here manually.
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5,
baseline=(-0.2, 0.0), decim=3, # we'll decimate for speed
verbose='error') # and ignore the warning about aliasing
###############################################################################
# Note that this method also attenuates any activity in your
# source estimates that resemble the baseline, if you like it or not.
noise_cov_baseline = mne.compute_covariance(epochs, tmax=0)
###############################################################################
# Plot the covariance matrices
# ----------------------------
#
# Try setting proj to False to see the effect. Notice that the projectors in
# epochs are already applied, so ``proj`` parameter has no effect.
noise_cov.plot(raw_empty_room.info, proj=True)
noise_cov_baseline.plot(epochs.info, proj=True)
###############################################################################
# .. _plot_compute_covariance_howto:
#
# How should I regularize the covariance matrix?
# ----------------------------------------------
#
# The estimated covariance can be numerically
# unstable and tends to induce correlations between estimated source amplitudes
# and the number of samples available. The MNE manual therefore suggests to
# regularize the noise covariance matrix (see
# :ref:`cov_regularization_math`), especially if only few samples are
# available. Unfortunately it is not easy to tell the effective number of
# samples, hence, to choose the appropriate regularization.
# In MNE-Python, regularization is done using advanced regularization methods
# described in :footcite:`EngemannGramfort2015`. For this the 'auto' option
# can be used. With this option cross-validation will be used to learn the
# optimal regularization:
noise_cov_reg = mne.compute_covariance(epochs, tmax=0., method='auto',
rank=None)
###############################################################################
# This procedure evaluates the noise covariance quantitatively by how well it
# whitens the data using the
# negative log-likelihood of unseen data. The final result can also be visually
# inspected.
# Under the assumption that the baseline does not contain a systematic signal
# (time-locked to the event of interest), the whitened baseline signal should
# be follow a multivariate Gaussian distribution, i.e.,
# whitened baseline signals should be between -1.96 and 1.96 at a given time
# sample.
# Based on the same reasoning, the expected value for the :term:`global field
# power (GFP) <GFP>` is 1 (calculation of the GFP should take into account the
# true degrees of freedom, e.g. ``ddof=3`` with 2 active SSP vectors):
evoked = epochs.average()
evoked.plot_white(noise_cov_reg, time_unit='s')
###############################################################################
# This plot displays both, the whitened evoked signals for each channels and
# the whitened :term:`GFP`. The numbers in the GFP panel represent the
# estimated rank of the data, which amounts to the effective degrees of freedom
# by which the squared sum across sensors is divided when computing the
# whitened :term:`GFP`. The whitened :term:`GFP` also helps detecting spurious
# late evoked components which can be the consequence of over- or
# under-regularization.
#
# Note that if data have been processed using signal space separation
# (SSS) :footcite:`TauluEtAl2005`,
# gradiometers and magnetometers will be displayed jointly because both are
# reconstructed from the same SSS basis vectors with the same numerical rank.
# This also implies that both sensor types are not any longer statistically
# independent.
# These methods for evaluation can be used to assess model violations.
# Additional
# introductory materials can be found `here <https://goo.gl/ElWrxe>`_.
#
# For expert use cases or debugging the alternative estimators can also be
# compared (see
# :ref:`sphx_glr_auto_examples_visualization_plot_evoked_whitening.py`) and
# :ref:`sphx_glr_auto_examples_inverse_plot_covariance_whitening_dspm.py`):
noise_covs = mne.compute_covariance(
epochs, tmax=0., method=('empirical', 'shrunk'), return_estimators=True,
rank=None)
evoked.plot_white(noise_covs, time_unit='s')
##############################################################################
# This will plot the whitened evoked for the optimal estimator and display the
# :term:`GFP` for all estimators as separate lines in the related panel.
##############################################################################
# Finally, let's have a look at the difference between empty room and
# event related covariance, hacking the "method" option so that their types
# are shown in the legend of the plot.
evoked_meg = evoked.copy().pick('meg')
noise_cov['method'] = 'empty_room'
noise_cov_baseline['method'] = 'baseline'
evoked_meg.plot_white([noise_cov_baseline, noise_cov], time_unit='s')
##############################################################################
# Based on the negative log-likelihood, the baseline covariance
# seems more appropriate. See :ref:`ex-covariance-whitening-dspm` for more
# information.
###############################################################################
# References
# ----------
#
# .. footbibliography::
| bsd-3-clause | -342,805,886,594,376,450 | 46.456522 | 79 | 0.658383 | false |
tsdgeos/snapcraft | external_snaps_tests/__main__.py | 5 | 3360 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Snapcraft external snaps tests.
This will clone the external repository, search for snapcraft.yaml files and
snap the packages.
Usage:
external_snaps_tests REPO_URL [--repo-branch BRANCH]
[--cleanbuild] [--keep-dir]
Arguments:
REPO_URL The URL of the repository to build.
Options:
--repo-branch The name of the branch to build. The default value build the
default branch of the repository.
--cleanbuild Build the snaps in a clean LXC container.
--keep-dir Do not remove the temporary directory where the repository was
cloned and snapped.
"""
import os
import shutil
import subprocess
import sys
import tempfile
import docopt
def main():
arguments = docopt.docopt(__doc__)
repo = arguments['REPO_URL']
repo_branch = arguments['--repo-branch']
cleanbuild = arguments['--cleanbuild']
keep_dir = arguments['--keep-dir']
if _is_git(repo):
if shutil.which('git'):
path = _git_clone(repo, repo_branch)
_build_snaps(path, cleanbuild, keep_dir)
else:
sys.exit('Please install git.')
else:
sys.exit('Unsupported repository.')
def _is_git(repo):
return (repo.startswith('https://github.com/') or
repo.startswith('git://') or
repo.startswith('https://git.launchpad.net/'))
def _git_clone(url, repo_branch=None):
temp_dir = tempfile.mkdtemp(prefix='snapcraft-')
command = ['git', 'clone', url, temp_dir]
print(' '.join(command))
subprocess.check_call(command)
if repo_branch:
subprocess.check_call(['git', 'checkout', repo_branch], cwd=temp_dir)
return temp_dir
def _build_snaps(path, cleanbuild=False, keep_dir=False):
try:
for dirpath, _, filenames in os.walk(path):
if 'snapcraft.yaml' in filenames or '.snapcraft.yaml' in filenames:
_build_snap(dirpath, cleanbuild, keep_dir)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
finally:
if keep_dir:
print(
'You can inspect the built project repository in {}'.format(
path))
else:
shutil.rmtree(path)
def _build_snap(path, cleanbuild=False, keep_dir=False):
snapcraft = os.path.abspath(os.path.join('bin', 'snapcraft'))
print('Updating the parts cache...')
subprocess.check_call([snapcraft, 'update'])
print('Snapping {}'.format(path))
command = [snapcraft, '-d']
if cleanbuild:
command.append('cleanbuild')
print(' '.join(command))
subprocess.check_call(command, cwd=path)
if __name__ == '__main__':
main()
| gpl-3.0 | -8,513,185,958,405,063,000 | 30.111111 | 79 | 0.647024 | false |
openfun/edx-platform | lms/djangoapps/instructor/hint_manager.py | 110 | 11466 | """
Views for hint management.
Get to these views through courseurl/hint_manager.
For example: https://courses.edx.org/courses/MITx/2.01x/2013_Spring/hint_manager
These views will only be visible if FEATURES['ENABLE_HINTER_INSTRUCTOR_VIEW'] = True
"""
import json
import re
from django.http import HttpResponse, Http404
from django.views.decorators.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response, render_to_string
from courseware.courses import get_course_with_access
from courseware.models import XModuleUserStateSummaryField
import courseware.module_render as module_render
import courseware.model_data as model_data
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.exceptions import ItemNotFoundError
@ensure_csrf_cookie
def hint_manager(request, course_id):
"""
The URL landing function for all calls to the hint manager, both POST and GET.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
try:
course = get_course_with_access(request.user, 'staff', course_key, depth=None)
except Http404:
out = 'Sorry, but students are not allowed to access the hint manager!'
return HttpResponse(out)
if request.method == 'GET':
out = get_hints(request, course_key, 'mod_queue')
out.update({'error': ''})
return render_to_response('instructor/hint_manager.html', out)
field = request.POST['field']
if not (field == 'mod_queue' or field == 'hints'):
# Invalid field. (Don't let users continue - they may overwrite other db's)
out = 'Error in hint manager - an invalid field was accessed.'
return HttpResponse(out)
switch_dict = {
'delete hints': delete_hints,
'switch fields': lambda *args: None, # Takes any number of arguments, returns None.
'change votes': change_votes,
'add hint': add_hint,
'approve': approve,
}
# Do the operation requested, and collect any error messages.
error_text = switch_dict[request.POST['op']](request, course_key, field)
if error_text is None:
error_text = ''
render_dict = get_hints(request, course_key, field, course=course)
render_dict.update({'error': error_text})
rendered_html = render_to_string('instructor/hint_manager_inner.html', render_dict)
return HttpResponse(json.dumps({'success': True, 'contents': rendered_html}))
def get_hints(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Load all of the hints submitted to the course.
Args:
`request` -- Django request object.
`course_id` -- The course id, like 'Me/19.002/test_course'
`field` -- Either 'hints' or 'mod_queue'; specifies which set of hints to load.
Keys in returned dict:
- 'field': Same as input
- 'other_field': 'mod_queue' if `field` == 'hints'; and vice-versa.
- 'field_label', 'other_field_label': English name for the above.
- 'all_hints': A list of [answer, pk dict] pairs, representing all hints.
Sorted by answer.
- 'id_to_name': A dictionary mapping problem id to problem name.
"""
if field == 'mod_queue':
other_field = 'hints'
field_label = 'Hints Awaiting Moderation'
other_field_label = 'Approved Hints'
elif field == 'hints':
other_field = 'mod_queue'
field_label = 'Approved Hints'
other_field_label = 'Hints Awaiting Moderation'
# We want to use the course_id to find all matching usage_id's.
# To do this, just take the school/number part - leave off the classname.
# FIXME: we need to figure out how to do this with opaque keys
all_hints = XModuleUserStateSummaryField.objects.filter(
field_name=field,
usage_id__regex=re.escape(u'{0.org}/{0.course}'.format(course_id)),
)
# big_out_dict[problem id] = [[answer, {pk: [hint, votes]}], sorted by answer]
# big_out_dict maps a problem id to a list of [answer, hints] pairs, sorted in order of answer.
big_out_dict = {}
# id_to name maps a problem id to the name of the problem.
# id_to_name[problem id] = Display name of problem
id_to_name = {}
for hints_by_problem in all_hints:
hints_by_problem.usage_id = hints_by_problem.usage_id.map_into_course(course_id)
name = location_to_problem_name(course_id, hints_by_problem.usage_id)
if name is None:
continue
id_to_name[hints_by_problem.usage_id] = name
def answer_sorter(thing):
"""
`thing` is a tuple, where `thing[0]` contains an answer, and `thing[1]` contains
a dict of hints. This function returns an index based on `thing[0]`, which
is used as a key to sort the list of things.
"""
try:
return float(thing[0])
except ValueError:
# Put all non-numerical answers first.
return float('-inf')
# Answer list contains [answer, dict_of_hints] pairs.
answer_list = sorted(json.loads(hints_by_problem.value).items(), key=answer_sorter)
big_out_dict[hints_by_problem.usage_id] = answer_list
render_dict = {'field': field,
'other_field': other_field,
'field_label': field_label,
'other_field_label': other_field_label,
'all_hints': big_out_dict,
'id_to_name': id_to_name}
return render_dict
def location_to_problem_name(course_id, loc):
"""
Given the location of a crowdsource_hinter module, try to return the name of the
problem it wraps around. Return None if the hinter no longer exists.
"""
try:
descriptor = modulestore().get_item(loc)
return descriptor.get_children()[0].display_name
except ItemNotFoundError:
# Sometimes, the problem is no longer in the course. Just
# don't include said problem.
return None
def delete_hints(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Deletes the hints specified.
`request.POST` contains some fields keyed by integers. Each such field contains a
[problem_defn_id, answer, pk] tuple. These tuples specify the hints to be deleted.
Example `request.POST`:
{'op': 'delete_hints',
'field': 'mod_queue',
1: ['problem_whatever', '42.0', '3'],
2: ['problem_whatever', '32.5', '12']}
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
# Can be optimized - sort the delete list by problem_id, and load each problem
# from the database only once.
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(this_problem.value)
del problem_dict[answer][pk]
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def change_votes(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Updates the number of votes.
The numbered fields of `request.POST` contain [problem_id, answer, pk, new_votes] tuples.
See `delete_hints`.
Example `request.POST`:
{'op': 'delete_hints',
'field': 'mod_queue',
1: ['problem_whatever', '42.0', '3', 42],
2: ['problem_whatever', '32.5', '12', 9001]}
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk, new_votes = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(this_problem.value)
# problem_dict[answer][pk] points to a [hint_text, #votes] pair.
problem_dict[answer][pk][1] = int(new_votes)
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def add_hint(request, course_id, field, course=None):
"""
Add a new hint. `request.POST`:
op
field
problem - The problem id
answer - The answer to which a hint will be added
hint - The text of the hint
"""
problem_id = request.POST['problem']
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
answer = request.POST['answer']
hint_text = request.POST['hint']
# Validate the answer. This requires initializing the xmodules, which
# is annoying.
try:
descriptor = modulestore().get_item(problem_key)
descriptors = [descriptor]
except ItemNotFoundError:
descriptors = []
field_data_cache = model_data.FieldDataCache(descriptors, course_id, request.user)
hinter_module = module_render.get_module(
request.user,
request,
problem_key,
field_data_cache,
course_id,
course=course
)
if not hinter_module.validate_answer(answer):
# Invalid answer. Don't add it to the database, or else the
# hinter will crash when we encounter it.
return 'Error - the answer you specified is not properly formatted: ' + str(answer)
this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
hint_pk_entry = XModuleUserStateSummaryField.objects.get(field_name='hint_pk', usage_id=problem_key)
this_pk = int(hint_pk_entry.value)
hint_pk_entry.value = this_pk + 1
hint_pk_entry.save()
problem_dict = json.loads(this_problem.value)
if answer not in problem_dict:
problem_dict[answer] = {}
problem_dict[answer][this_pk] = [hint_text, 1]
this_problem.value = json.dumps(problem_dict)
this_problem.save()
def approve(request, course_id, field, course=None): # pylint: disable=unused-argument
"""
Approve a list of hints, moving them from the mod_queue to the real
hint list. POST:
op, field
(some number) -> [problem, answer, pk]
The numbered fields are analogous to those in `delete_hints` and `change_votes`.
"""
for key in request.POST:
if key == 'op' or key == 'field':
continue
problem_id, answer, pk = request.POST.getlist(key)
problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)
# Can be optimized - sort the delete list by problem_id, and load each problem
# from the database only once.
problem_in_mod = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)
problem_dict = json.loads(problem_in_mod.value)
hint_to_move = problem_dict[answer][pk]
del problem_dict[answer][pk]
problem_in_mod.value = json.dumps(problem_dict)
problem_in_mod.save()
problem_in_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=problem_key)
problem_dict = json.loads(problem_in_hints.value)
if answer not in problem_dict:
problem_dict[answer] = {}
problem_dict[answer][pk] = hint_to_move
problem_in_hints.value = json.dumps(problem_dict)
problem_in_hints.save()
| agpl-3.0 | -2,290,158,809,397,382,000 | 38.95122 | 109 | 0.647654 | false |
reshama/data-science-from-scratch | code/getting_data.py | 60 | 6317 | from __future__ import division
from collections import Counter
import math, random, csv, json
from bs4 import BeautifulSoup
import requests
######
#
# BOOKS ABOUT DATA
#
######
def is_video(td):
"""it's a video if it has exactly one pricelabel, and if
the stripped text inside that pricelabel starts with 'Video'"""
pricelabels = td('span', 'pricelabel')
return (len(pricelabels) == 1 and
pricelabels[0].text.strip().startswith("Video"))
def book_info(td):
"""given a BeautifulSoup <td> Tag representing a book,
extract the book's details and return a dict"""
title = td.find("div", "thumbheader").a.text
by_author = td.find('div', 'AuthorName').text
authors = [x.strip() for x in re.sub("^By ", "", by_author).split(",")]
isbn_link = td.find("div", "thumbheader").a.get("href")
isbn = re.match("/product/(.*)\.do", isbn_link).groups()[0]
date = td.find("span", "directorydate").text.strip()
return {
"title" : title,
"authors" : authors,
"isbn" : isbn,
"date" : date
}
from time import sleep
def scrape(num_pages=31):
base_url = "http://shop.oreilly.com/category/browse-subjects/" + \
"data.do?sortby=publicationDate&page="
books = []
for page_num in range(1, num_pages + 1):
print "souping page", page_num
url = base_url + str(page_num)
soup = BeautifulSoup(requests.get(url).text, 'html5lib')
for td in soup('td', 'thumbtext'):
if not is_video(td):
books.append(book_info(td))
# now be a good citizen and respect the robots.txt!
sleep(30)
return books
def get_year(book):
"""book["date"] looks like 'November 2014' so we need to
split on the space and then take the second piece"""
return int(book["date"].split()[1])
def plot_years(plt, books):
# 2014 is the last complete year of data (when I ran this)
year_counts = Counter(get_year(book) for book in books
if get_year(book) <= 2014)
years = sorted(year_counts)
book_counts = [year_counts[year] for year in x]
plt.bar([x - 0.5 for x in years], book_counts)
plt.xlabel("year")
plt.ylabel("# of data books")
plt.title("Data is Big!")
plt.show()
##
#
# APIs
#
##
endpoint = "https://api.github.com/users/joelgrus/repos"
repos = json.loads(requests.get(endpoint).text)
from dateutil.parser import parse
dates = [parse(repo["created_at"]) for repo in repos]
month_counts = Counter(date.month for date in dates)
weekday_counts = Counter(date.weekday() for date in dates)
####
#
# Twitter
#
####
from twython import Twython
# fill these in if you want to use the code
CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_TOKEN = ""
ACCESS_TOKEN_SECRET = ""
def call_twitter_search_api():
twitter = Twython(CONSUMER_KEY, CONSUMER_SECRET)
# search for tweets containing the phrase "data science"
for status in twitter.search(q='"data science"')["statuses"]:
user = status["user"]["screen_name"].encode('utf-8')
text = status["text"].encode('utf-8')
print user, ":", text
print
from twython import TwythonStreamer
# appending data to a global variable is pretty poor form
# but it makes the example much simpler
tweets = []
class MyStreamer(TwythonStreamer):
"""our own subclass of TwythonStreamer that specifies
how to interact with the stream"""
def on_success(self, data):
"""what do we do when twitter sends us data?
here data will be a Python object representing a tweet"""
# only want to collect English-language tweets
if data['lang'] == 'en':
tweets.append(data)
# stop when we've collected enough
if len(tweets) >= 1000:
self.disconnect()
def on_error(self, status_code, data):
print status_code, data
self.disconnect()
def call_twitter_streaming_api():
stream = MyStreamer(CONSUMER_KEY, CONSUMER_SECRET,
ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# starts consuming public statuses that contain the keyword 'data'
stream.statuses.filter(track='data')
if __name__ == "__main__":
def process(date, symbol, price):
print date, symbol, price
print "tab delimited stock prices:"
with open('tab_delimited_stock_prices.txt', 'rb') as f:
reader = csv.reader(f, delimiter='\t')
for row in reader:
date = row[0]
symbol = row[1]
closing_price = float(row[2])
process(date, symbol, closing_price)
print
print "colon delimited stock prices:"
with open('colon_delimited_stock_prices.txt', 'rb') as f:
reader = csv.DictReader(f, delimiter=':')
for row in reader:
date = row["date"]
symbol = row["symbol"]
closing_price = float(row["closing_price"])
process(date, symbol, closing_price)
print
print "writing out comma_delimited_stock_prices.txt"
today_prices = { 'AAPL' : 90.91, 'MSFT' : 41.68, 'FB' : 64.5 }
with open('comma_delimited_stock_prices.txt','wb') as f:
writer = csv.writer(f, delimiter=',')
for stock, price in today_prices.items():
writer.writerow([stock, price])
print "BeautifulSoup"
html = requests.get("http://www.example.com").text
soup = BeautifulSoup(html)
print soup
print
print "parsing json"
serialized = """{ "title" : "Data Science Book",
"author" : "Joel Grus",
"publicationYear" : 2014,
"topics" : [ "data", "science", "data science"] }"""
# parse the JSON to create a Python object
deserialized = json.loads(serialized)
if "data science" in deserialized["topics"]:
print deserialized
print
print "GitHub API"
print "dates", dates
print "month_counts", month_counts
print "weekday_count", weekday_counts
last_5_repositories = sorted(repos,
key=lambda r: r["created_at"],
reverse=True)[:5]
print "last five languages", [repo["language"]
for repo in last_5_repositories]
| unlicense | 8,779,195,072,058,586,000 | 27.327354 | 75 | 0.601551 | false |
Anonymouslemming/ansible | lib/ansible/modules/network/nxos/nxos_vrf_af.py | 37 | 7795 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {
'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: nxos_vrf_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VRF AF.
description:
- Manages VRF AF
author: Gabriele Gerbino (@GGabriele)
notes:
- Default, where supported, restores params default value.
options:
vrf:
description:
- Name of the VRF.
required: true
afi:
description:
- Address-Family Identifier (AFI).
required: true
choices: ['ipv4', 'ipv6']
default: null
safi:
description:
- Sub Address-Family Identifier (SAFI).
required: true
choices: ['unicast', 'multicast']
default: null
route_target_both_auto_evpn:
description:
- Enable/Disable the EVPN route-target 'auto' setting for both
import and export target communities.
required: false
choices: ['true', 'false']
default: null
state:
description:
- Determines whether the config should be present or
not on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vrf_af:
vrf: ntc
afi: ipv4
safi: unicast
route_target_both_auto_evpn: True
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["vrf context ntc", "address-family ipv4 unicast",
"afi ipv4", "route-target both auto evpn", "vrf ntc",
"safi unicast"]
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
BOOL_PARAMS = ['route_target_both_auto_evpn']
PARAM_TO_COMMAND_KEYMAP = {
'vrf': 'vrf',
'safi': 'safi',
'afi': 'afi',
'route_target_both_auto_evpn': 'route-target both auto evpn'
}
PARAM_TO_DEFAULT_KEYMAP = {}
def get_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP.get(arg)
if arg in BOOL_PARAMS:
command_re = re.compile(r'\s+{0}\s*$'.format(command), re.M)
value = False
try:
if command_re.search(config):
value = True
except TypeError:
value = False
else:
command_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
value = ''
if command in config:
value = command_re.search(config).group('value')
return value
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
parents = ['vrf context {0}'.format(module.params['vrf'])]
parents.append('address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
config = netcfg.get_section(parents)
if config:
splitted_config = config.splitlines()
vrf_index = False
for index in range(0, len(splitted_config) - 1):
if 'vrf' in splitted_config[index].strip():
vrf_index = index
break
if vrf_index:
config = '\n'.join(splitted_config[0:vrf_index])
for arg in args:
if arg not in ['afi', 'safi', 'vrf']:
existing[arg] = get_value(arg, config, module)
existing['afi'] = module.params['afi']
existing['safi'] = module.params['safi']
existing['vrf'] = module.params['vrf']
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = table.get(key)
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
commands.append('no {0} {1}'.format(key, existing_value))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
parents = ['vrf context {0}'.format(module.params['vrf'])]
parents.append('address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
commands = []
parents = ['vrf context {0}'.format(module.params['vrf'])]
commands.append('no address-family {0} {1}'.format(module.params['afi'],
module.params['safi']))
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
vrf=dict(required=True, type='str'),
safi=dict(required=True, type='str', choices=['unicast', 'multicast']),
afi=dict(required=True, type='str', choices=['ipv4', 'ipv6']),
route_target_both_auto_evpn=dict(required=False, type='bool'),
m_facts=dict(required=False, default=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present', required=False),
include_defaults=dict(default=False),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
state = module.params['state']
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, existing, proposed, candidate)
if candidate:
load_config(module, candidate)
result['changed'] = True
result['commands'] = candidate.items_text()
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 9,171,238,911,959,784,000 | 30.055777 | 85 | 0.61052 | false |
jmartinm/invenio | modules/bibformat/lib/elements/bfe_webauthorpage_data.py | 18 | 1333 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints authors
"""
from cgi import escape
from invenio.webauthorprofile_config import serialize
def format_element(bfo):
"""
Return list of profile data.
"""
data_dict = {}
year_fields = map(bfo.fields, ['260__c', '269__c', '773__y', '502__d'])
recid = bfo.recID
data_dict['year_fields'] = year_fields
return serialize([recid, data_dict])
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 | 1,376,026,148,586,453,800 | 30 | 75 | 0.691673 | false |
cwadding/sensit-python | sensit/api/publication.py | 1 | 2940 | # Publications are stored actions which are taken when a feed is created, updated, deleted, or there is a matching percolator query.
#
# topic_id - The key for the parent topic
# id - The identifier of the publication
class Publication():
def __init__(self, topic_id, id, client):
self.topic_id = topic_id
self.id = id
self.client = client
# Get all publications for the associated Topic. Requires authorization of **read_any_publications**, or **read_application_publications**.
# '/api/topics/:topic_id/publications' GET
#
def list(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/' + self.topic_id + '/publications', body, options)
return response
# Retrieve a specific publication on the associated topic by Id. Requires authorization of **read_any_publications**, or **read_application_publications**.
# '/api/topics/:topic_id/publications/:id' GET
#
def find(self, options = {}):
body = options['query'] if 'query' in options else {}
response = self.client.get('/api/topics/' + self.topic_id + '/publications/' + self.id + '', body, options)
return response
# Create a new publication on the associated Topic which can be easily retrieved later using an id. Requires authorization of **manage_any_publications**, or **manage_application_publications**.
# '/api/topics/:topic_id/publications' POST
#
# publication - A Hash containing `host`:The ip address or host of the connection(required).`protocol`:the protocol to communicate over (http, tcp, udp, mqtt) (required)`port`:The port of the connection.
def create(self, publication, options = {}):
body = options['body'] if 'body' in options else {}
body['publication'] = publication
response = self.client.post('/api/topics/' + self.topic_id + '/publications', body, options)
return response
# Update a publication. Requires authorization of **manage_any_publications**, or **manage_application_publications**.
# '/api/topics/:topic_id/publications/:id' PUT
#
# publication - A Hash containing `host`:The ip address or host of the connection(required).`protocol`:the protocol to communicate over (http, tcp, udp, mqtt) (required)`port`:The port of the connection.
def update(self, publication, options = {}):
body = options['body'] if 'body' in options else {}
body['publication'] = publication
response = self.client.put('/api/topics/' + self.topic_id + '/publications/' + self.id + '', body, options)
return response
# Remove a saved publication on the associated Topic by Id. Requires authorization of **manage_any_publications**, or **manage_application_publications**.
# '/api/topics/:topic_id/publications/:id' DELETE
#
def delete(self, options = {}):
body = options['body'] if 'body' in options else {}
response = self.client.delete('/api/topics/' + self.topic_id + '/publications/' + self.id + '', body, options)
return response
| mit | 2,570,357,287,248,789,000 | 44.230769 | 204 | 0.709184 | false |
sysalexis/kbengine | kbe/src/lib/python/Lib/test/sortperf.py | 92 | 4805 | """Sort performance test.
See main() for command line syntax.
See tabulate() for output format.
"""
import sys
import time
import random
import marshal
import tempfile
import os
td = tempfile.gettempdir()
def randfloats(n):
"""Return a list of n random floats in [0, 1)."""
# Generating floats is expensive, so this writes them out to a file in
# a temp directory. If the file already exists, it just reads them
# back in and shuffles them a bit.
fn = os.path.join(td, "rr%06d" % n)
try:
fp = open(fn, "rb")
except OSError:
r = random.random
result = [r() for i in range(n)]
try:
try:
fp = open(fn, "wb")
marshal.dump(result, fp)
fp.close()
fp = None
finally:
if fp:
try:
os.unlink(fn)
except OSError:
pass
except OSError as msg:
print("can't write", fn, ":", msg)
else:
result = marshal.load(fp)
fp.close()
# Shuffle it a bit...
for i in range(10):
i = random.randrange(n)
temp = result[:i]
del result[:i]
temp.reverse()
result.extend(temp)
del temp
assert len(result) == n
return result
def flush():
sys.stdout.flush()
def doit(L):
t0 = time.perf_counter()
L.sort()
t1 = time.perf_counter()
print("%6.2f" % (t1-t0), end=' ')
flush()
def tabulate(r):
"""Tabulate sort speed for lists of various sizes.
The sizes are 2**i for i in r (the argument, a list).
The output displays i, 2**i, and the time to sort arrays of 2**i
floating point numbers with the following properties:
*sort: random data
\sort: descending data
/sort: ascending data
3sort: ascending, then 3 random exchanges
+sort: ascending, then 10 random at the end
%sort: ascending, then randomly replace 1% of the elements w/ random values
~sort: many duplicates
=sort: all equal
!sort: worst case scenario
"""
cases = tuple([ch + "sort" for ch in r"*\/3+%~=!"])
fmt = ("%2s %7s" + " %6s"*len(cases))
print(fmt % (("i", "2**i") + cases))
for i in r:
n = 1 << i
L = randfloats(n)
print("%2d %7d" % (i, n), end=' ')
flush()
doit(L) # *sort
L.reverse()
doit(L) # \sort
doit(L) # /sort
# Do 3 random exchanges.
for dummy in range(3):
i1 = random.randrange(n)
i2 = random.randrange(n)
L[i1], L[i2] = L[i2], L[i1]
doit(L) # 3sort
# Replace the last 10 with random floats.
if n >= 10:
L[-10:] = [random.random() for dummy in range(10)]
doit(L) # +sort
# Replace 1% of the elements at random.
for dummy in range(n // 100):
L[random.randrange(n)] = random.random()
doit(L) # %sort
# Arrange for lots of duplicates.
if n > 4:
del L[4:]
L = L * (n // 4)
# Force the elements to be distinct objects, else timings can be
# artificially low.
L = list(map(lambda x: --x, L))
doit(L) # ~sort
del L
# All equal. Again, force the elements to be distinct objects.
L = list(map(abs, [-0.5] * n))
doit(L) # =sort
del L
# This one looks like [3, 2, 1, 0, 0, 1, 2, 3]. It was a bad case
# for an older implementation of quicksort, which used the median
# of the first, last and middle elements as the pivot.
half = n // 2
L = list(range(half - 1, -1, -1))
L.extend(range(half))
# Force to float, so that the timings are comparable. This is
# significantly faster if we leave tham as ints.
L = list(map(float, L))
doit(L) # !sort
print()
def main():
"""Main program when invoked as a script.
One argument: tabulate a single row.
Two arguments: tabulate a range (inclusive).
Extra arguments are used to seed the random generator.
"""
# default range (inclusive)
k1 = 15
k2 = 20
if sys.argv[1:]:
# one argument: single point
k1 = k2 = int(sys.argv[1])
if sys.argv[2:]:
# two arguments: specify range
k2 = int(sys.argv[2])
if sys.argv[3:]:
# derive random seed from remaining arguments
x = 1
for a in sys.argv[3:]:
x = 69069 * x + hash(a)
random.seed(x)
r = range(k1, k2+1) # include the end point
tabulate(r)
if __name__ == '__main__':
main()
| lgpl-3.0 | 5,491,446,564,412,256,000 | 27.431953 | 79 | 0.515921 | false |
kswiat/django | django/utils/module_loading.py | 18 | 6640 | from __future__ import absolute_import # Avoid importing `importlib` from this package.
import copy
from importlib import import_module
import os
import sys
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
def import_string(dotted_path):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError:
msg = "%s doesn't look like a module path" % dotted_path
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError:
msg = 'Module "%s" does not define a "%s" attribute/class' % (
dotted_path, class_name)
six.reraise(ImportError, ImportError(msg), sys.exc_info()[2])
def import_by_path(dotted_path, error_prefix=''):
"""
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImproperlyConfigured if something goes wrong.
"""
warnings.warn(
'import_by_path() has been deprecated. Use import_string() instead.',
RemovedInDjango19Warning, stacklevel=2)
try:
attr = import_string(dotted_path)
except ImportError as e:
msg = '%sError importing module %s: "%s"' % (
error_prefix, dotted_path, e)
six.reraise(ImproperlyConfigured, ImproperlyConfigured(msg),
sys.exc_info()[2])
return attr
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get('register_to')
for app_config in apps.get_app_configs():
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
for module_to_search in args:
import_module('%s.%s' % (app_config.name, module_to_search))
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an admin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
if sys.version_info[:2] >= (3, 3):
if sys.version_info[:2] >= (3, 4):
from importlib.util import find_spec as importlib_find
else:
from importlib import find_loader as importlib_find
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
try:
package_name = package.__name__
package_path = package.__path__
except AttributeError:
# package isn't a package.
return False
full_module_name = package_name + '.' + module_name
return importlib_find(full_module_name, package_path) is not None
else:
import imp
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
name = ".".join([package.__name__, module_name])
try:
# None indicates a cached miss; see mark_miss() in Python/import.c.
return sys.modules[name] is not None
except KeyError:
pass
try:
package_path = package.__path__ # No __path__, then not a package.
except AttributeError:
# Since the remainder of this function assumes that we're dealing with
# a package (module with a __path__), so if it's not, then bail here.
return False
for finder in sys.meta_path:
if finder.find_module(name, package_path):
return True
for entry in package_path:
try:
# Try the cached finder.
finder = sys.path_importer_cache[entry]
if finder is None:
# Implicit import machinery should be used.
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
continue
# Else see if the finder knows of a loader.
elif finder.find_module(name):
return True
else:
continue
except KeyError:
# No cached finder, so try and make one.
for hook in sys.path_hooks:
try:
finder = hook(entry)
# XXX Could cache in sys.path_importer_cache
if finder.find_module(name):
return True
else:
# Once a finder is found, stop the search.
break
except ImportError:
# Continue the search for a finder.
continue
else:
# No finder found.
# Try the implicit import machinery if searching a directory.
if os.path.isdir(entry):
try:
file_, _, _ = imp.find_module(module_name, [entry])
if file_:
file_.close()
return True
except ImportError:
pass
# XXX Could insert None or NullImporter
else:
# Exhausted the search, so the module cannot be found.
return False
| bsd-3-clause | -8,163,787,533,782,796,000 | 37.381503 | 88 | 0.55497 | false |
paterson/servo | tests/wpt/css-tests/css21_dev/xhtml1/support/fonts/makegsubfonts.py | 820 | 14309 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData() | mpl-2.0 | -7,520,309,863,198,824,000 | 28.444444 | 135 | 0.641275 | false |
blacktear23/django | tests/regressiontests/utils/html.py | 86 | 4753 | import unittest
from django.utils import html
class TestUtilsHtml(unittest.TestCase):
def check_output(self, function, value, output=None):
"""
Check that function(value) equals output. If output is None,
check that function(value) equals value.
"""
if output is None:
output = value
self.assertEqual(function(value), output)
def test_escape(self):
f = html.escape
items = (
('&','&'),
('<', '<'),
('>', '>'),
('"', '"'),
("'", '''),
)
# Substitution patterns for testing the above items.
patterns = ("%s", "asdf%sfdsa", "%s1", "1%sb")
for value, output in items:
for pattern in patterns:
self.check_output(f, pattern % value, pattern % output)
# Check repeated values.
self.check_output(f, value * 2, output * 2)
# Verify it doesn't double replace &.
self.check_output(f, '<&', '<&')
def test_linebreaks(self):
f = html.linebreaks
items = (
("para1\n\npara2\r\rpara3", "<p>para1</p>\n\n<p>para2</p>\n\n<p>para3</p>"),
("para1\nsub1\rsub2\n\npara2", "<p>para1<br />sub1<br />sub2</p>\n\n<p>para2</p>"),
("para1\r\n\r\npara2\rsub1\r\rpara4", "<p>para1</p>\n\n<p>para2<br />sub1</p>\n\n<p>para4</p>"),
("para1\tmore\n\npara2", "<p>para1\tmore</p>\n\n<p>para2</p>"),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_tags(self):
f = html.strip_tags
items = (
('<adf>a', 'a'),
('</adf>a', 'a'),
('<asdf><asdf>e', 'e'),
('<f', '<f'),
('</fe', '</fe'),
('<x>b<y>', 'b'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_spaces_between_tags(self):
f = html.strip_spaces_between_tags
# Strings that should come out untouched.
items = (' <adf>', '<adf> ', ' </adf> ', ' <f> x</f>')
for value in items:
self.check_output(f, value)
# Strings that have spaces to strip.
items = (
('<d> </d>', '<d></d>'),
('<p>hello </p>\n<p> world</p>', '<p>hello </p><p> world</p>'),
('\n<p>\t</p>\n<p> </p>\n', '\n<p></p><p></p>\n'),
)
for value, output in items:
self.check_output(f, value, output)
def test_strip_entities(self):
f = html.strip_entities
# Strings that should come out untouched.
values = ("&", "&a", "&a", "a&#a")
for value in values:
self.check_output(f, value)
# Valid entities that should be stripped from the patterns.
entities = ("", "", "&a;", "&fdasdfasdfasdf;")
patterns = (
("asdf %(entity)s ", "asdf "),
("%(entity)s%(entity)s", ""),
("&%(entity)s%(entity)s", "&"),
("%(entity)s3", "3"),
)
for entity in entities:
for in_pattern, output in patterns:
self.check_output(f, in_pattern % {'entity': entity}, output)
def test_fix_ampersands(self):
f = html.fix_ampersands
# Strings without ampersands or with ampersands already encoded.
values = ("a", "b", "&a;", "& &x; ", "asdf")
patterns = (
("%s", "%s"),
("&%s", "&%s"),
("&%s&", "&%s&"),
)
for value in values:
for in_pattern, out_pattern in patterns:
self.check_output(f, in_pattern % value, out_pattern % value)
# Strings with ampersands that need encoding.
items = (
("&#;", "&#;"),
("ͫ ;", "&#875 ;"),
("abc;", "&#4abc;"),
)
for value, output in items:
self.check_output(f, value, output)
def test_escapejs(self):
f = html.escapejs
items = (
(u'"double quotes" and \'single quotes\'', u'\\u0022double quotes\\u0022 and \\u0027single quotes\\u0027'),
(ur'\ : backslashes, too', u'\\u005C : backslashes, too'),
(u'and lots of whitespace: \r\n\t\v\f\b', u'and lots of whitespace: \\u000D\\u000A\\u0009\\u000B\\u000C\\u0008'),
(ur'<script>and this</script>', u'\\u003Cscript\\u003Eand this\\u003C/script\\u003E'),
(u'paragraph separator:\u2029and line separator:\u2028', u'paragraph separator:\\u2029and line separator:\\u2028'),
)
for value, output in items:
self.check_output(f, value, output)
| bsd-3-clause | 6,880,819,676,930,169,000 | 37.642276 | 127 | 0.489375 | false |
Beyond-Imagination/BlubBlub | ChatbotServer/ChatbotEnv/Lib/site-packages/numpy/polynomial/tests/test_laguerre.py | 58 | 17242 | """Tests for laguerre module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.laguerre as lag
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
L0 = np.array([1])/1
L1 = np.array([1, -1])/1
L2 = np.array([2, -4, 1])/2
L3 = np.array([6, -18, 9, -1])/6
L4 = np.array([24, -96, 72, -16, 1])/24
L5 = np.array([120, -600, 600, -200, 25, -1])/120
L6 = np.array([720, -4320, 5400, -2400, 450, -36, 1])/720
Llist = [L0, L1, L2, L3, L4, L5, L6]
def trim(x):
return lag.lagtrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_lagdomain(self):
assert_equal(lag.lagdomain, [0, 1])
def test_lagzero(self):
assert_equal(lag.lagzero, [0])
def test_lagone(self):
assert_equal(lag.lagone, [1])
def test_lagx(self):
assert_equal(lag.lagx, [1, -1])
class TestArithmetic(TestCase):
x = np.linspace(-3, 3, 100)
def test_lagadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = lag.lagadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_lagsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = lag.lagsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_lagmulx(self):
assert_equal(lag.lagmulx([0]), [0])
assert_equal(lag.lagmulx([1]), [1, -1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [-i, 2*i + 1, -(i + 1)]
assert_almost_equal(lag.lagmulx(ser), tgt)
def test_lagmul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = lag.lagval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = lag.lagval(self.x, pol2)
pol3 = lag.lagmul(pol1, pol2)
val3 = lag.lagval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_lagdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = lag.lagadd(ci, cj)
quo, rem = lag.lagdiv(tgt, ci)
res = lag.lagadd(lag.lagmul(quo, ci), rem)
assert_almost_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([9., -14., 6.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_lagval(self):
#check empty input
assert_equal(lag.lagval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Llist]
for i in range(7):
msg = "At i=%d" % i
tgt = y[i]
res = lag.lagval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(lag.lagval(x, [1]).shape, dims)
assert_equal(lag.lagval(x, [1, 0]).shape, dims)
assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims)
def test_lagval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = lag.lagval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = lag.lagval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_lagval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = lag.lagval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = lag.lagval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_laggrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = lag.laggrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = lag.laggrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_laggrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = lag.laggrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = lag.laggrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_lagint(self):
# check exceptions
assert_raises(ValueError, lag.lagint, [0], .5)
assert_raises(ValueError, lag.lagint, [0], -1)
assert_raises(ValueError, lag.lagint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = lag.lagint([0], m=i, k=k)
assert_almost_equal(res, [1, -1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
lagpol = lag.poly2lag(pol)
lagint = lag.lagint(lagpol, m=1, k=[i])
res = lag.lag2poly(lagint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
lagpol = lag.poly2lag(pol)
lagint = lag.lagint(lagpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(lag.lagval(-1, lagint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
lagpol = lag.poly2lag(pol)
lagint = lag.lagint(lagpol, m=1, k=[i], scl=2)
res = lag.lag2poly(lagint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1)
res = lag.lagint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1, k=[k])
res = lag.lagint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1, k=[k], lbnd=-1)
res = lag.lagint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = lag.lagint(tgt, m=1, k=[k], scl=2)
res = lag.lagint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_lagint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([lag.lagint(c) for c in c2d.T]).T
res = lag.lagint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([lag.lagint(c) for c in c2d])
res = lag.lagint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([lag.lagint(c, k=3) for c in c2d])
res = lag.lagint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_lagder(self):
# check exceptions
assert_raises(ValueError, lag.lagder, [0], .5)
assert_raises(ValueError, lag.lagder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = lag.lagder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = lag.lagder(lag.lagint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = lag.lagder(lag.lagint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_lagder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([lag.lagder(c) for c in c2d.T]).T
res = lag.lagder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([lag.lagder(c) for c in c2d])
res = lag.lagder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_lagvander(self):
# check for 1d x
x = np.arange(3)
v = lag.lagvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], lag.lagval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = lag.lagvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], lag.lagval(x, coef))
def test_lagvander2d(self):
# also tests lagval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = lag.lagvander2d(x1, x2, [1, 2])
tgt = lag.lagval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = lag.lagvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_lagvander3d(self):
# also tests lagval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = lag.lagvander3d(x1, x2, x3, [1, 2, 3])
tgt = lag.lagval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = lag.lagvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_lagfit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, lag.lagfit, [1], [1], -1)
assert_raises(TypeError, lag.lagfit, [[1]], [1], 0)
assert_raises(TypeError, lag.lagfit, [], [1], 0)
assert_raises(TypeError, lag.lagfit, [1], [[[1]]], 0)
assert_raises(TypeError, lag.lagfit, [1, 2], [1], 0)
assert_raises(TypeError, lag.lagfit, [1], [1, 2], 0)
assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, lag.lagfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, lag.lagfit, [1], [1], [-1,])
assert_raises(ValueError, lag.lagfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, lag.lagfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = lag.lagfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(lag.lagval(x, coef3), y)
coef3 = lag.lagfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(lag.lagval(x, coef3), y)
#
coef4 = lag.lagfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(lag.lagval(x, coef4), y)
coef4 = lag.lagfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(lag.lagval(x, coef4), y)
#
coef2d = lag.lagfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = lag.lagfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = lag.lagfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = lag.lagfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = lag.lagfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(lag.lagfit(x, x, 1), [1, -1])
assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, lag.lagcompanion, [])
assert_raises(ValueError, lag.lagcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(lag.lagcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5)
class TestGauss(TestCase):
def test_100(self):
x, w = lag.laggauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = lag.lagvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = 1.0
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_lagfromroots(self):
res = lag.lagfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = lag.lagfromroots(roots)
res = lag.lagval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(lag.lag2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_lagroots(self):
assert_almost_equal(lag.lagroots([1]), [])
assert_almost_equal(lag.lagroots([0, 1]), [1])
for i in range(2, 5):
tgt = np.linspace(0, 3, i)
res = lag.lagroots(lag.lagfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_lagtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, lag.lagtrim, coef, -1)
# Test results
assert_equal(lag.lagtrim(coef), coef[:-1])
assert_equal(lag.lagtrim(coef, 1), coef[:-3])
assert_equal(lag.lagtrim(coef, 2), [0])
def test_lagline(self):
assert_equal(lag.lagline(3, 4), [7, -4])
def test_lag2poly(self):
for i in range(7):
assert_almost_equal(lag.lag2poly([0]*i + [1]), Llist[i])
def test_poly2lag(self):
for i in range(7):
assert_almost_equal(lag.poly2lag(Llist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(0, 10, 11)
tgt = np.exp(-x)
res = lag.lagweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | -4,871,106,777,705,268,000 | 31.593573 | 74 | 0.50377 | false |
alisaifee/limits | limits/_version.py | 1 | 18450 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "limits-"
cfg.versionfile_source = "limits/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| mit | -5,901,818,309,689,888,000 | 34.480769 | 79 | 0.575176 | false |
jgonthier/psi4 | tests/pytests/test_qcvars.py | 10 | 8520 | import copy
import pytest
from .utils import *
import numpy as np
import psi4
pytestmark = pytest.mark.quick
_vars_entered = {
'VAR A': 4.0,
'VaR B': -4.0,
'MATVAR A': psi4.core.Matrix.from_array(np.arange(6).reshape(2, 3)),
'MatvaR B': psi4.core.Matrix.from_array(np.arange(3).reshape(1, 3)),
'NPVAR A': np.arange(8).reshape(2, 4),
'NpvaR B': np.arange(4).reshape(1, 4),
}
_vars_stored = {
'VAR A': 4.0,
'VAR B': -4.0,
'MATVAR A': psi4.core.Matrix.from_array(np.arange(6).reshape(2, 3)),
'MATVAR B': psi4.core.Matrix.from_array(np.arange(3).reshape(1, 3)),
'NPVAR A': psi4.core.Matrix.from_array(np.arange(8).reshape(2, 4)),
'NPVAR B': psi4.core.Matrix.from_array(np.arange(4).reshape(1, 4)),
}
@pytest.fixture
def pe_wfn_qcvars():
psi4.core.clean_variables()
he = psi4.geometry('He')
wfn = psi4.core.Wavefunction.build(he, 'cc-pvdz')
for pv, pvv in _vars_entered.items():
psi4.core.set_variable(pv, pvv)
wfn.set_variable(pv, pvv)
return wfn
# can't use compare_dicts with symmetry psi4.Matrix
def _compare_qcvars(ref, expected, decimal, label):
assert set(ref.keys()) == set(expected.keys())
for k, v in ref.items():
if isinstance(v, psi4.core.Matrix):
assert compare_matrices(v, expected[k], decimal, label)
else:
assert compare_values(v, expected[k], decimal, label)
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variables(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
subject = obj.variables()
_compare_qcvars(_vars_stored, subject, 8, '')
obj.set_variable('npvar A', np.zeros(3).reshape(1, 3))
_compare_qcvars(_vars_stored, subject, 8, '')
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_set_variable_overwrite(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
# fine to overwrite keys
key = 'var D'
val = 3.3
val2 = 4.4
obj.set_variable(key, val)
assert compare_values(val, obj.variable(key), 8, tnm())
obj.set_variable(key, val2)
assert compare_values(val2, obj.variable(key), 8, tnm())
# fine to overwrite array keys
key = 'matvar D'
mat = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
mat2 = psi4.core.Matrix.from_array(np.arange(6).reshape(3, 2))
obj.set_variable(key, mat)
assert compare_matrices(mat, obj.variable(key), 8, tnm())
obj.set_variable(key, mat2)
assert compare_matrices(mat2, obj.variable(key), 8, tnm())
# not fine to shadow keys with both types
with pytest.raises(psi4.ValidationError) as err:
obj.set_variable('vAr D', mat)
assert 'already a scalar variable' in str(err.value)
with pytest.raises(psi4.ValidationError) as err:
obj.set_variable('matvAr D', val)
assert 'already an array variable' in str(err.value)
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_none(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
with pytest.raises(KeyError):
obj.variable('var f')
@pytest.mark.parametrize("mode,key", [
pytest.param('globals', 'vAR B', id='globals scal'),
pytest.param('globals', 'MatvAR B', id='globals mat'),
pytest.param('globals', 'NpvAR B', id='globals np'),
pytest.param('wfn', 'vAR B', id='wfn scal'),
pytest.param('wfn', 'MatvAR B', id='wfn mat'),
pytest.param('wfn', 'NpvAR B', id='wfn np'),
])
def test_variable(mode, key, pe_wfn_qcvars, request):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
if 'scal' in request.node.name:
compare = compare_values
else:
compare = compare_matrices
assert compare(_vars_stored[key.upper()], obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_mem_scal(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
key = 'VaR C'
ref = 3.3
val = 3.3
obj.set_variable(key, val)
assert compare_values(ref, val, 8, tnm())
assert compare_values(ref, obj.variable(key), 8, tnm())
val *= 2
assert compare_values(ref, obj.variable(key), 8, tnm())
accessed = obj.variable(key)
accessed *= 3
assert compare_values(ref, obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_mem_mat(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
key = 'MaTvAr C'
ref = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
val = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
obj.set_variable(key, val)
assert compare_matrices(ref, val, 8, tnm())
assert compare_matrices(ref, obj.variable(key), 8, tnm())
val.scale(2)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
accessed = obj.variable(key)
accessed.scale(3)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode", [
("globals"),
("wfn"),
])
def test_variable_mem_np(mode, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
key = 'npVaR C'
ref = np.arange(4).reshape(2, 2)
val = np.arange(4).reshape(2, 2)
obj.set_variable(key, val)
assert compare_arrays(ref, val, 8, tnm())
ref = psi4.core.Matrix.from_array(ref)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
val *= 2
assert compare_matrices(ref, obj.variable(key), 8, tnm())
accessed = obj.variable(key)
accessed.scale(3)
assert compare_matrices(ref, obj.variable(key), 8, tnm())
@pytest.mark.parametrize("mode,tkey,fkey", [
pytest.param('globals', 'var A', 'var C', id='globals scal'),
pytest.param('globals', 'matvar A', 'var C', id='globals mat'),
pytest.param('globals', 'npvar A', 'var C', id='globals np'),
pytest.param('wfn', 'var A', 'var C', id='wfn scal'),
pytest.param('wfn', 'matvar A', 'var C', id='wfn mat'),
pytest.param('wfn', 'npvar A', 'var C', id='wfn np'),
])
def test_has_del_variable_scal(mode, tkey, fkey, pe_wfn_qcvars):
obj = {'globals': psi4.core, 'wfn': pe_wfn_qcvars}[mode]
assert obj.has_variable(tkey)
assert not obj.has_variable(fkey)
obj.del_variable(tkey)
assert not obj.has_variable(tkey)
obj.del_variable(fkey)
# <<< TODO Deprecated! Delete in Psi4 v1.4 >>>
def test_deprecated_core_get_variable(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_variable('vAR B')
assert compare_values(_vars_stored['VAR B'], subject, 8, tnm())
def test_deprecated_core_get_variables(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_variables()
scals = {k: v for k, v in _vars_stored.items() if k.startswith('VAR ')}
_compare_qcvars(scals, subject, 8, tnm())
def test_deprecated_core_get_array_variable(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_array_variable('MatvAR B')
assert compare_matrices(_vars_stored['MATVAR B'], subject, 8, tnm())
def test_deprecated_core_get_array_variables(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = psi4.core.get_array_variables()
arrs = {k: v for k, v in _vars_stored.items() if not k.startswith('VAR ')}
_compare_qcvars(arrs, subject, 8, tnm())
def test_deprecated_wfn_get_variable(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = pe_wfn_qcvars.get_variable('vAR B')
assert compare_values(_vars_stored['VAR B'], subject, 8, tnm())
def test_deprecated_wfn_get_array(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = pe_wfn_qcvars.get_array('MatvAR B')
assert compare_matrices(_vars_stored['MATVAR B'], subject, 8, tnm())
def test_deprecated_wfn_set_array(pe_wfn_qcvars):
mat = psi4.core.Matrix.from_array(np.arange(4).reshape(2, 2))
with pytest.warns(FutureWarning) as err:
pe_wfn_qcvars.set_array('matvar D', mat)
assert compare_matrices(mat, pe_wfn_qcvars.variable('MATvar D'), 8, tnm())
def test_deprecated_wfn_arrays(pe_wfn_qcvars):
with pytest.warns(FutureWarning) as err:
subject = pe_wfn_qcvars.arrays()
arrs = {k: v for k, v in _vars_stored.items() if not k.startswith('VAR ')}
_compare_qcvars(arrs, subject, 8, tnm())
| lgpl-3.0 | 7,590,267,403,531,396,000 | 29.106007 | 78 | 0.635211 | false |
vktr/CouchPotatoServer | couchpotato/core/media/movie/providers/automation/crowdai.py | 12 | 3025 | import re
from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt, splitString
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'CrowdAI'
class CrowdAI(Automation, RSS):
interval = 1800
def getIMDBids(self):
movies = []
urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))]))
for url in urls:
if not urls[url]:
continue
rss_movies = self.getRSSData(url)
for movie in rss_movies:
description = self.getTextElement(movie, 'description')
grabs = 0
for item in movie:
if item.attrib.get('name') == 'grabs':
grabs = item.attrib.get('value')
break
if int(grabs) > tryInt(self.conf('number_grabs')):
title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1)
log.info2('%s grabs for movie: %s, enqueue...', (grabs, title))
year = re.match(r'.*Year: (\d{4}).*', description).group(1)
imdb = self.search(title, year)
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'crowdai',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'crowdai_automation',
'label': 'CrowdAI',
'description': 'Imports from any newznab powered NZB providers RSS feed depending on the number of grabs per movie. Go to your newznab site and find the RSS section. Then copy the copy paste the link under "Movies > x264 feed" here.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_urls_use',
'label': 'Use',
'default': '1',
},
{
'name': 'automation_urls',
'label': 'url',
'type': 'combined',
'combine': ['automation_urls_use', 'automation_urls'],
'default': 'http://YOUR_PROVIDER/rss?t=THE_MOVIE_CATEGORY&i=YOUR_USER_ID&r=YOUR_API_KEY&res=2&rls=2&num=100',
},
{
'name': 'number_grabs',
'default': '500',
'label': 'Grab threshold',
'description': 'Number of grabs required',
},
],
},
],
}]
| gpl-3.0 | 2,018,766,652,589,445,400 | 31.988764 | 246 | 0.466446 | false |
openmips/stbgui | lib/python/Screens/TimeDateInput.py | 25 | 2498 | from Screen import Screen
from Components.config import ConfigClock, ConfigDateTime, getConfigListEntry
from Components.ActionMap import NumberActionMap
from Components.ConfigList import ConfigListScreen
from Components.Label import Label
from Components.Pixmap import Pixmap
import time
import datetime
class TimeDateInput(Screen, ConfigListScreen):
def __init__(self, session, config_time=None, config_date=None):
Screen.__init__(self, session)
self.setTitle(_("Date/time input"))
self["oktext"] = Label(_("OK"))
self["canceltext"] = Label(_("Cancel"))
self["ok"] = Pixmap()
self["cancel"] = Pixmap()
self.createConfig(config_date, config_time)
self["actions"] = NumberActionMap(["SetupActions"],
{
"ok": self.keySelect,
"save": self.keyGo,
"cancel": self.keyCancel,
}, -2)
self.list = []
ConfigListScreen.__init__(self, self.list)
self.createSetup(self["config"])
def createConfig(self, conf_date, conf_time):
self.save_mask = 0
if conf_time:
self.save_mask |= 1
else:
conf_time = ConfigClock(default = time.time()),
if conf_date:
self.save_mask |= 2
else:
conf_date = ConfigDateTime(default = time.time(), formatstring = _("%d.%B %Y"), increment = 86400)
self.timeinput_date = conf_date
self.timeinput_time = conf_time
def createSetup(self, configlist):
self.list = [
getConfigListEntry(_("Date"), self.timeinput_date),
getConfigListEntry(_("Time"), self.timeinput_time)
]
configlist.list = self.list
configlist.l.setList(self.list)
def keyPageDown(self):
sel = self["config"].getCurrent()
if sel and sel[1] == self.timeinput_time:
self.timeinput_time.decrement()
self["config"].invalidateCurrent()
def keyPageUp(self):
sel = self["config"].getCurrent()
if sel and sel[1] == self.timeinput_time:
self.timeinput_time.increment()
self["config"].invalidateCurrent()
def keySelect(self):
self.keyGo()
def getTimestamp(self, date, mytime):
d = time.localtime(date)
dt = datetime.datetime(d.tm_year, d.tm_mon, d.tm_mday, mytime[0], mytime[1])
return int(time.mktime(dt.timetuple()))
def keyGo(self):
time = self.getTimestamp(self.timeinput_date.value, self.timeinput_time.value)
if self.save_mask & 1:
self.timeinput_time.save()
if self.save_mask & 2:
self.timeinput_date.save()
self.close((True, time))
def keyCancel(self):
if self.save_mask & 1:
self.timeinput_time.cancel()
if self.save_mask & 2:
self.timeinput_date.cancel()
self.close((False,))
| gpl-2.0 | 2,164,552,874,550,766,800 | 28.046512 | 101 | 0.696157 | false |
seaotterman/tensorflow | tensorflow/contrib/learn/python/learn/metric_spec_test.py | 136 | 14891 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MetricSpec."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# pylint: disable=g-bad-todo,g-import-not-at-top
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.python.platform import test
class MetricSpecTest(test.TestCase):
def test_named_args_with_weights(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = {"l1": "l1_value", "l2": "l2_value"}
predictions_ = {"p1": "p1_value", "p2": "p2_value"}
def _fn0(predictions, labels, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", labels)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
def _fn1(predictions, targets, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", targets)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
def _fn2(prediction, label, weight=None):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", label)
self.assertEqual("f2_value", weight)
return "metric_fn_result"
def _fn3(prediction, target, weight=None):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", target)
self.assertEqual("f2_value", weight)
return "metric_fn_result"
for fn in (_fn0, _fn1, _fn2, _fn3):
spec = MetricSpec(
metric_fn=fn, prediction_key="p1", label_key="l1", weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_args(self):
def _fn():
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(
{"f1": "f1_value"}, "labels_value", "predictions_value")
def test_kwargs(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(**kwargs):
self.assertEqual({}, kwargs)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_named_labels_no_predictions(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(labels):
self.assertEqual(labels_, labels)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_named_labels_no_predictions_with_kwargs(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(labels, **kwargs):
self.assertEqual(labels_, labels)
self.assertEqual({}, kwargs)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_no_named_predictions_named_labels_first_arg(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(labels, predictions_by_another_name):
self.assertEqual(predictions_, predictions_by_another_name)
self.assertEqual(labels_, labels)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_predictions_named_labels_second_arg(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(predictions_by_another_name, labels):
self.assertEqual(predictions_, predictions_by_another_name)
self.assertEqual(labels_, labels)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_labels(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(predictions):
self.assertEqual(predictions_, predictions)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_labels_or_predictions_1arg(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(a):
self.assertEqual(predictions_, a)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn)
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_no_named_labels_or_predictions_2args(self):
features = {"f1": "f1_value"}
labels_ = "labels_value"
predictions_ = "predictions_value"
def _fn(a, b):
del a, b
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn)
with self.assertRaises(TypeError):
spec.create_metric_ops(features, labels_, predictions_)
def test_named_args_no_weights(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = {"l1": "l1_value", "l2": "l2_value"}
predictions_ = {"p1": "p1_value", "p2": "p2_value"}
def _fn0(predictions, labels):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", labels)
return "metric_fn_result"
def _fn1(predictions, targets):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", targets)
return "metric_fn_result"
def _fn2(prediction, label):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", label)
return "metric_fn_result"
def _fn3(prediction, target):
self.assertEqual("p1_value", prediction)
self.assertEqual("l1_value", target)
return "metric_fn_result"
for fn in (_fn0, _fn1, _fn2, _fn3):
spec = MetricSpec(metric_fn=fn, prediction_key="p1", label_key="l1")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_predictions_dict_no_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value", "l2": "l2_value"}
predictions = {"p1": "p1_value", "p2": "p2_value"}
def _fn(predictions, labels, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn, label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError,
"MetricSpec without specified prediction_key requires predictions"
" tensor or single element dict"):
spec.create_metric_ops(features, labels, predictions)
def test_labels_dict_no_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value", "l2": "l2_value"}
predictions = {"p1": "p1_value", "p2": "p2_value"}
def _fn(labels, predictions, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(metric_fn=_fn, prediction_key="p1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError,
"MetricSpec without specified label_key requires labels tensor or"
" single element dict"):
spec.create_metric_ops(features, labels, predictions)
def test_single_prediction(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = {"l1": "l1_value", "l2": "l2_value"}
predictions_ = "p1_value"
def _fn(predictions, labels, weights=None):
self.assertEqual(predictions_, predictions)
self.assertEqual("l1_value", labels)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn, label_key="l1", weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_single_label(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels_ = "l1_value"
predictions_ = {"p1": "p1_value", "p2": "p2_value"}
def _fn(predictions, labels, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual(labels_, labels)
self.assertEqual("f2_value", weights)
return "metric_fn_result"
spec = MetricSpec(metric_fn=_fn, prediction_key="p1", weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels_, predictions_))
def test_single_predictions_with_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value", "l2": "l2_value"}
predictions = "p1_value"
def _fn(predictions, labels, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(
metric_fn=_fn, prediction_key="p1", label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError,
"MetricSpec with prediction_key specified requires predictions dict"):
spec.create_metric_ops(features, labels, predictions)
def test_single_labels_with_key(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = "l1_value"
predictions = {"p1": "p1_value", "p2": "p2_value"}
def _fn(predictions, labels, weights=None):
del labels, predictions, weights
self.fail("Expected failure before metric_fn.")
spec = MetricSpec(
metric_fn=_fn, prediction_key="p1", label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(
ValueError, "MetricSpec with label_key specified requires labels dict"):
spec.create_metric_ops(features, labels, predictions)
def test_str(self):
def _metric_fn(labels, predictions, weights=None):
return predictions, labels, weights
string = str(MetricSpec(
metric_fn=_metric_fn,
label_key="my_label",
prediction_key="my_prediction",
weight_key="my_weight"))
self.assertIn("_metric_fn", string)
self.assertIn("my_label", string)
self.assertIn("my_prediction", string)
self.assertIn("my_weight", string)
def test_partial_str(self):
def custom_metric(predictions, labels, stuff, weights=None):
return predictions, labels, weights, stuff
string = str(MetricSpec(
metric_fn=functools.partial(custom_metric, stuff=5),
label_key="my_label",
prediction_key="my_prediction",
weight_key="my_weight"))
self.assertIn("custom_metric", string)
self.assertIn("my_label", string)
self.assertIn("my_prediction", string)
self.assertIn("my_weight", string)
def test_partial(self):
features = {"f1": "f1_value", "f2": "f2_value"}
labels = {"l1": "l1_value"}
predictions = {"p1": "p1_value", "p2": "p2_value"}
def custom_metric(predictions, labels, stuff, weights=None):
self.assertEqual("p1_value", predictions)
self.assertEqual("l1_value", labels)
self.assertEqual("f2_value", weights)
if stuff:
return "metric_fn_result"
raise ValueError("No stuff.")
spec = MetricSpec(
metric_fn=functools.partial(custom_metric, stuff=5),
label_key="l1",
prediction_key="p1",
weight_key="f2")
self.assertEqual(
"metric_fn_result",
spec.create_metric_ops(features, labels, predictions))
spec = MetricSpec(
metric_fn=functools.partial(custom_metric, stuff=None),
prediction_key="p1", label_key="l1", weight_key="f2")
with self.assertRaisesRegexp(ValueError, "No stuff."):
spec.create_metric_ops(features, labels, predictions)
def test_label_key_without_label_arg(self):
def _fn0(predictions, weights=None):
del predictions, weights
self.fail("Expected failure before metric_fn.")
def _fn1(prediction, weight=None):
del prediction, weight
self.fail("Expected failure before metric_fn.")
for fn in (_fn0, _fn1):
with self.assertRaisesRegexp(ValueError, "label.*missing"):
MetricSpec(metric_fn=fn, label_key="l1")
def test_weight_key_without_weight_arg(self):
def _fn0(predictions, labels):
del predictions, labels
self.fail("Expected failure before metric_fn.")
def _fn1(prediction, label):
del prediction, label
self.fail("Expected failure before metric_fn.")
def _fn2(predictions, targets):
del predictions, targets
self.fail("Expected failure before metric_fn.")
def _fn3(prediction, target):
del prediction, target
self.fail("Expected failure before metric_fn.")
for fn in (_fn0, _fn1, _fn2, _fn3):
with self.assertRaisesRegexp(ValueError, "weight.*missing"):
MetricSpec(metric_fn=fn, weight_key="f2")
def test_multiple_label_args(self):
def _fn0(predictions, labels, targets):
del predictions, labels, targets
self.fail("Expected failure before metric_fn.")
def _fn1(prediction, label, target):
del prediction, label, target
self.fail("Expected failure before metric_fn.")
for fn in (_fn0, _fn1):
with self.assertRaisesRegexp(ValueError, "provide only one of.*label"):
MetricSpec(metric_fn=fn)
def test_multiple_prediction_args(self):
def _fn(predictions, prediction, labels):
del predictions, prediction, labels
self.fail("Expected failure before metric_fn.")
with self.assertRaisesRegexp(ValueError, "provide only one of.*prediction"):
MetricSpec(metric_fn=_fn)
def test_multiple_weight_args(self):
def _fn(predictions, labels, weights=None, weight=None):
del predictions, labels, weights, weight
self.fail("Expected failure before metric_fn.")
with self.assertRaisesRegexp(ValueError, "provide only one of.*weight"):
MetricSpec(metric_fn=_fn)
if __name__ == "__main__":
test.main()
| apache-2.0 | -6,618,708,374,674,634,000 | 33.3903 | 80 | 0.64858 | false |
KiChjang/servo | tests/wpt/web-platform-tests/tools/third_party/pywebsocket3/test/testdata/handlers/origin_check_wsh.py | 21 | 1934 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def web_socket_do_extra_handshake(request):
if request.ws_origin == 'http://example.com':
return
raise ValueError('Unacceptable origin: %r' % request.ws_origin)
def web_socket_transfer_data(request):
message = 'origin_check_wsh.py is called for %s, %s' % (
request.ws_resource, request.ws_protocol)
request.connection.write(message.encode('UTF-8'))
# vi:sts=4 sw=4 et
| mpl-2.0 | 3,945,826,400,840,328,700 | 43.976744 | 72 | 0.759049 | false |
lgp171188/cookiecutter | tests/test_generate_copy_without_render.py | 25 | 2238 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_generate_copy_without_render
---------------------------------
"""
from __future__ import unicode_literals
import os
import pytest
from cookiecutter import generate
from cookiecutter import utils
@pytest.fixture(scope='function')
def remove_test_dir(request):
"""
Remove the folder that is created by the test.
"""
def fin_remove_test_dir():
if os.path.exists('test_copy_without_render'):
utils.rmtree('test_copy_without_render')
request.addfinalizer(fin_remove_test_dir)
@pytest.mark.usefixtures('clean_system', 'remove_test_dir')
def test_generate_copy_without_render_extensions():
generate.generate_files(
context={
'cookiecutter': {
'repo_name': 'test_copy_without_render',
'render_test': 'I have been rendered!',
'_copy_without_render': [
'*not-rendered',
'rendered/not_rendered.yml',
'*.txt',
]}
},
repo_dir='tests/test-generate-copy-without-render'
)
dir_contents = os.listdir('test_copy_without_render')
assert '{{cookiecutter.repo_name}}-not-rendered' in dir_contents
assert 'test_copy_without_render-rendered' in dir_contents
with open('test_copy_without_render/README.txt') as f:
assert '{{cookiecutter.render_test}}' in f.read()
with open('test_copy_without_render/README.rst') as f:
assert 'I have been rendered!' in f.read()
with open('test_copy_without_render/'
'test_copy_without_render-rendered/'
'README.txt') as f:
assert '{{cookiecutter.render_test}}' in f.read()
with open('test_copy_without_render/'
'test_copy_without_render-rendered/'
'README.rst') as f:
assert 'I have been rendered' in f.read()
with open('test_copy_without_render/'
'{{cookiecutter.repo_name}}-not-rendered/'
'README.rst') as f:
assert '{{cookiecutter.render_test}}' in f.read()
with open('test_copy_without_render/rendered/not_rendered.yml') as f:
assert '{{cookiecutter.render_test}}' in f.read()
| bsd-3-clause | 7,118,681,708,085,399,000 | 30.521127 | 73 | 0.595621 | false |
h00dy/Diamond | src/collectors/slony/test/testslony.py | 23 | 3624 | #!/usr/bin/python
# coding=utf-8
###############################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import run_only
from mock import patch
from slony import SlonyCollector
def run_only_if_psycopg2_is_available(func):
try:
import psycopg2
except ImportError:
psycopg2 = None
pred = lambda: psycopg2 is not None
return run_only(func, pred)
class TestSlonyCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SlonyCollector', {})
self.collector = SlonyCollector(config, None)
def test_import(self):
self.assertTrue(SlonyCollector)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
@patch.object(SlonyCollector, 'publish')
def test_default(self, publish, _get_stats_by_database):
_get_stats_by_database.return_value = [('foo', 7)]
self.collector.collect()
_get_stats_by_database.assert_called_with(
'localhost',
5432,
'postgres',
'postgres',
'postgres',
'_postgres',
'Node [0-9]+ - postgres@localhost',
)
self.assertPublished(publish, 'foo', 7)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
@patch.object(SlonyCollector, 'publish')
def test_instances(self, publish, _get_stats_by_database):
def side_effect(host, port, user, pwd, slony_db, slony_schema, node):
if (slony_db, slony_schema) == ('postgres', '_postgres'):
return [('foo', 7)]
elif (slony_db, slony_schema) == ('data', '_data'):
return [('bar', 14)]
_get_stats_by_database.side_effect = side_effect
config = get_collector_config('SlonyCollector', {
'instances': {
'alpha': {
'slony_db': 'postgres',
'slony_schema': '_postgres',
},
'beta': {
'slony_db': 'data',
'slony_schema': '_data',
},
}
})
collector = SlonyCollector(config, None)
collector.collect()
self.assertPublished(publish, 'foo', 7)
self.assertPublished(publish, 'bar', 14)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
def test_override_user_password_nodestr(self, _get_stats_by_database):
config = get_collector_config('SlonyCollector', {
'instances': {
'alpha': {
'slony_db': 'postgres',
'slony_schema': '_postgres',
'user': 'postgres',
'password': 'postgres',
'slony_node_string': '(.*)',
},
'beta': {
'slony_db': 'data',
'slony_schema': '_data',
'user': 'data',
'password': 'data',
'slony_node_string': 'Node (.*)',
},
}
})
collector = SlonyCollector(config, None)
collector.collect()
_get_stats_by_database.assert_any_call(
'localhost', 5432, 'postgres', 'postgres',
'postgres', '_postgres', '(.*)'
)
_get_stats_by_database.assert_any_call(
'localhost', 5432, 'data', 'data',
'data', '_data', 'Node (.*)'
)
| mit | -6,685,511,309,345,439,000 | 31.070796 | 79 | 0.51021 | false |
openstack/rally | tests/unit/test_resources.py | 1 | 1715 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import difflib
import os
import rally
from rally.cli import cliutils
from rally.utils import encodeutils
from tests.unit import test
RES_PATH = os.path.join(os.path.dirname(rally.__file__), os.pardir, "etc")
class BashCompletionTestCase(test.TestCase):
def test_bash_completion(self):
with open(os.path.join(RES_PATH, "rally.bash_completion"), "r") as f:
old = f.read().splitlines()
new = cliutils._generate_bash_completion_script().splitlines()
if old != new:
for line in difflib.unified_diff(old, new):
print(line)
new_filename = "/tmp/rally.bash.new"
with open(new_filename, "wb") as new_file:
new_file.write(encodeutils.safe_encode("\n".join(new)))
self.fail("bash completion script is outdated. "
"New script is located at %s "
"You may fix this by executing "
"`mv %s etc/rally.bash_completion`" % (new_filename,
new_filename))
| apache-2.0 | -2,202,229,556,121,345,300 | 39.833333 | 78 | 0.626822 | false |
mat650/metagoofil | discovery/googlesearch.py | 15 | 1388 | import string
import httplib, sys
import myparser
import re
import time
class search_google:
def __init__(self,word,limit,start,filetype):
self.word=word
self.results=""
self.totalresults=""
self.filetype=filetype
self.server="www.google.com"
self.hostname="www.google.com"
self.userAgent="(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
self.quantity="100"
self.limit=limit
self.counter=start
def do_search_files(self):
h = httplib.HTTP(self.server)
h.putrequest('GET', "/search?num="+self.quantity+"&start=" + str(self.counter) + "&hl=en&meta=&q=filetype:"+self.filetype+"%20site:" + self.word)
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, headers = h.getreply()
self.results = h.getfile().read()
self.totalresults+= self.results
def get_emails(self):
rawres=myparser.parser(self.totalresults,self.word)
return rawres.emails()
def get_hostnames(self):
rawres=myparser.parser(self.totalresults,self.word)
return rawres.hostnames()
def get_files(self):
rawres=myparser.parser(self.totalresults,self.word)
return rawres.fileurls()
def process_files(self):
while self.counter < self.limit:
self.do_search_files()
time.sleep(1)
self.counter+=100
print "\tSearching "+ str(self.counter) + " results..."
| gpl-2.0 | -896,784,996,591,188,000 | 27.916667 | 147 | 0.711095 | false |
fceller/arangodb | 3rdParty/boost/1.62.0/libs/mpi/test/python/scatter_test.py | 64 | 1300 | # Copyright (C) 2006 Douglas Gregor <doug.gregor -at- gmail.com>.
# Use, modification and distribution is subject to the Boost Software
# License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test scatter() collective.
import boost.parallel.mpi as mpi
from generators import *
def scatter_test(comm, generator, kind, root):
if comm.rank == root:
print ("Scattering %s from root %d..." % (kind, root)),
if comm.rank == root:
values = list()
for p in range(0, comm.size):
values.append(generator(p))
result = mpi.scatter(comm, values, root = root)
else:
result = mpi.scatter(comm, root = root);
assert result == generator(comm.rank)
if comm.rank == root: print "OK."
return
scatter_test(mpi.world, int_generator, "integers", 0)
scatter_test(mpi.world, int_generator, "integers", 1)
scatter_test(mpi.world, gps_generator, "GPS positions", 0)
scatter_test(mpi.world, gps_generator, "GPS positions", 1)
scatter_test(mpi.world, string_generator, "strings", 0)
scatter_test(mpi.world, string_generator, "strings", 1)
scatter_test(mpi.world, string_list_generator, "list of strings", 0)
scatter_test(mpi.world, string_list_generator, "list of strings", 1)
| apache-2.0 | -2,034,549,639,160,177,200 | 35.111111 | 73 | 0.676923 | false |
drawks/ansible | test/units/module_utils/xenserver/test_xenserverobject.py | 14 | 2121 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2019, Bojan Vitnik <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from .FakeAnsibleModule import FakeAnsibleModule, ExitJsonException, FailJsonException
from .common import fake_xenapi_ref
def test_xenserverobject_xenapi_lib_detection(mocker, fake_ansible_module, xenserver):
"""Tests XenAPI lib detection code."""
mocker.patch('ansible.module_utils.xenserver.HAS_XENAPI', new=False)
with pytest.raises(FailJsonException) as exc_info:
xenserver.XenServerObject(fake_ansible_module)
assert exc_info.value.kwargs['msg'] == ("XenAPI Python library is required for this module! "
"Please download XenServer SDK and copy XenAPI.py to your Python site-packages. "
"Check Notes section in module documentation for more info.")
def test_xenserverobject_xenapi_failure(mock_xenapi_failure, fake_ansible_module, xenserver):
"""Tests catching of XenAPI failures."""
with pytest.raises(FailJsonException) as exc_info:
xenserver.XenServerObject(fake_ansible_module)
assert exc_info.value.kwargs['msg'] == "XAPI ERROR: %s" % mock_xenapi_failure[1]
def test_xenserverobject(mocker, fake_ansible_module, XenAPI, xenserver):
"""Tests successful creation of XenServerObject."""
mocked_xenapi = mocker.patch.object(XenAPI.Session, 'xenapi', create=True)
mocked_returns = {
"pool.get_all.return_value": [fake_xenapi_ref('pool')],
"pool.get_default_SR.return_value": fake_xenapi_ref('SR'),
"session.get_this_host.return_value": fake_xenapi_ref('host'),
"host.get_software_version.return_value": {"product_version": "7.2.0"},
}
mocked_xenapi.configure_mock(**mocked_returns)
xso = xenserver.XenServerObject(fake_ansible_module)
assert xso.pool_ref == fake_xenapi_ref('pool')
assert xso.xenserver_version == [7, 2, 0]
| gpl-3.0 | -4,819,046,169,347,684,000 | 39.788462 | 125 | 0.688355 | false |
ahachete/gpdb | gpMgmt/bin/gppylib/commands/test/regress/test_regress_pg.py | 54 | 1711 | #!/usr/bin/env python
#
# Copyright (c) Greenplum Inc 2008. All Rights Reserved.
#
# Unit Testing of pg commands
#
import os
import unittest
import tempfile
from gppylib.db import dbconn
from gppylib.db.test import skipIfDatabaseDown
from gppylib import gplog
from gppylib.commands import pg
from gppylib.gparray import GpArray
logger = gplog.get_default_logger()
gplog.enable_verbose_logging()
@skipIfDatabaseDown()
class PgCommandsTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testReadPostmasterTempFile(self):
logger.info("testReadPostmasterTempFile")
url = dbconn.DbURL()
gpdb = GpArray.initFromCatalog(url)
logger.info("Search for valid master port: %s" % gpdb.master.port)
cmd = pg.ReadPostmasterTempFile.local('test pg tempfile read',gpdb.master.port)
(exists,PID,datadir)=cmd.getResults()
logger.info("exists:=%s PID=%d datadir='%s'" % (exists,PID,datadir))
self.assertTrue(exists)
self.assertTrue(PID > 0)
self.assertEquals(datadir,gpdb.master.datadir)
gpdb.master.port=4000
logger.info("Search for bogus master port: %s" % gpdb.master.port)
cmd = pg.ReadPostmasterTempFile.local('test pg tempfile read',gpdb.master.port)
(exists,PID,datadir)=cmd.getResults()
logger.info("exists:=%s PID=%d datadir='%s'" % (exists,PID,datadir))
self.assertFalse(exists)
#------------------------------- Mainline --------------------------------
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 954,840,922,570,454,900 | 28.5 | 95 | 0.607832 | false |
FrancisLab/bing_image_archiver | iorise_image_extractor.py | 1 | 4682 | import re
import urllib
from HTMLParser import HTMLParser
class BlogAttachmentPageParser(HTMLParser):
"""HTMLParser used to extract the url of Bing images from a Blog Post Attachment Page from www.iorise.com
(e.g.: http://www.iorise.com/blog/?attachment_id=44)"""
def __init__(self, result_list):
""" Constructor: Initialize parser. """
HTMLParser.__init__(self)
self.result_list = result_list
# Regex used to validate the href attribute of a tags
self.href_chk = re.compile('^http://www[.]iorise[.]com/blog/wp-content/uploads/20[0-9]{2}/[01][0-9]/.+[.](jpg|jpeg)$')
def handle_starttag(self, tag, attrs):
""" Method called when the parser encounter a start tag. """
# The url to the image will be in an achor tag
if tag == 'a':
# Check if we are currently at the right a tag
if self.validate_a_tag(attrs):
for attr_name, attr_value in attrs:
if attr_name == 'href':
self.result_list.append(attr_value)
def validate_a_tag(self, attrs):
""" Method called to check if a <a> tag and its attributes correspond to what we're looking for. """
href_ok = False
rel_ok = False
for attribute_name, value in attrs:
# Check the href
if attribute_name == 'href':
if self.href_chk.match(value):
href_ok = True
# Check the rel
elif attribute_name == 'rel':
#if value == 'attachment':
# rel_ok = True
return False
# The tag should not contain any more attributes
else:
return False
return href_ok
class BlogDayPageParser(HTMLParser):
"""HTMLParser used to extract the url of attachment page containing the Bing images from a Day Page from
www.iorise.com (e.g.: http://www.iorise.com/blog/?m=20121125)"""
def __init__(self, result_list):
""" Constructor: Initialize parser. """
HTMLParser.__init__(self)
self.result_list = result_list
# Regex used to validate the href attribute of a tags
self.href_chk = re.compile('^http://www[.]iorise[.]com/(blog/)?[?]attachment_id=[0-9]+$')
self.rel_chk = re.compile('^attachment wp-att-[0-9]+$')
def handle_starttag(self, tag, attrs):
""" Method called when the parser encounter a start tag. """
# The url we are looking for will be in an <a> tag
if tag == 'a':
# Check if we are currently at the right a tag
if self.validate_a_tag(attrs):
for attr_name, attr_value in attrs:
if attr_name == 'href':
self.result_list.append(attr_value)
def validate_a_tag(self, attrs):
""" Method called to check if a <a> tag and its attributes correspond to what we're looking for. """
href_ok = False
rel_ok = False
for attribute_name, value in attrs:
# Check the href
if attribute_name == 'href':
if self.href_chk.match(value):
href_ok = True
# Check the rel
elif attribute_name == 'rel':
if self.rel_chk.match(value):
rel_ok = True
# The tag should not contain any more attributes
else:
return False
return href_ok and rel_ok
def extract_all_image_urls(date_to_extract):
""" Function used to extract all Bing images of the day published on iorise between the two provided dates. """
url = "http://www.iorise.com/blog/?m={year}{month:02}{day:02}".format(year=date_to_extract.year,
month=date_to_extract.month,
day=date_to_extract.day)
try:
page = urllib.urlopen(url)
except:
return []
# Extract attachment pages from day page
attachment_pages_url = []
day_page_parser = BlogDayPageParser(attachment_pages_url)
day_page_parser.feed(page.read().decode('UTF-8'))
all_image_urls = []
# For each attachment page, extract the image urls
for page_url in attachment_pages_url:
try:
attachment_page = urllib.urlopen(page_url)
except:
continue
image_urls = []
parser = BlogAttachmentPageParser(image_urls)
parser.feed(attachment_page.read().decode('UTF-8'))
all_image_urls += image_urls
return all_image_urls
| mit | 3,137,355,662,316,118,000 | 31.513889 | 126 | 0.559376 | false |
Distrotech/intellij-community | python/testData/inspections/PyTypeCheckerInspection/Generator.py | 25 | 3013 | def test():
def gen(n):
for x in xrange(n):
yield str(x)
def f_1(xs):
"""
:type xs: list of int
"""
return xs
def f_2(xs):
"""
:type xs: collections.Sequence of int
"""
return xs
def f_3(xs):
"""
:type xs: collections.Container of int
"""
return xs
def f_4(xs):
"""
:type xs: collections.Iterator of int
"""
return xs
def f_5(xs):
"""
:type xs: collections.Iterable of int
"""
return xs
def f_6(xs):
"""
:type xs: list
"""
return xs
def f_7(xs):
"""
:type xs: collections.Sequence
"""
return xs
def f_8(xs):
"""
:type xs: collections.Container
"""
return xs
def f_9(xs):
"""
:type xs: collections.Iterator
"""
return xs
def f_10(xs):
"""
:type xs: collections.Iterable
"""
return xs
def f_11(xs):
"""
:type xs: list of string
"""
return xs
def f_12(xs):
"""
:type xs: collections.Sequence of string
"""
return xs
def f_13(xs):
"""
:type xs: collections.Container of string
"""
return xs
def f_14(xs):
"""
:type xs: collections.Iterator of string
"""
return xs
def f_15(xs):
"""
:type xs: collections.Iterable of string
"""
return xs
return [
''.join(gen(10)),
f_1(<warning descr="Expected type 'list[int]', got '__generator[str]' instead">gen(11)</warning>),
f_2(<warning descr="Expected type 'Sequence[int]', got '__generator[str]' instead">gen(11)</warning>),
f_3(<warning descr="Expected type 'Container[int]', got '__generator[str]' instead">gen(11)</warning>),
f_4(<warning descr="Expected type 'Iterator[int]', got '__generator[str]' instead">gen(11)</warning>),
f_5(<warning descr="Expected type 'Iterable[int]', got '__generator[str]' instead">gen(11)</warning>),
f_6(<warning descr="Expected type 'list', got '__generator[str]' instead">gen(11)</warning>),
f_7(<warning descr="Expected type 'Sequence', got '__generator[str]' instead">gen(11)</warning>),
f_8(<warning descr="Expected type 'Container', got '__generator[str]' instead">gen(11)</warning>),
f_9(gen(11)),
f_10(gen(11)),
f_11(<warning descr="Expected type 'list[Union[str, unicode]]', got '__generator[str]' instead">gen(11)</warning>),
f_12(<warning descr="Expected type 'Sequence[Union[str, unicode]]', got '__generator[str]' instead">gen(11)</warning>),
f_13(<warning descr="Expected type 'Container[Union[str, unicode]]', got '__generator[str]' instead">gen(11)</warning>),
f_14(gen(11)),
f_15(gen(11)),
f_15('foo'.split('o')),
]
| apache-2.0 | -2,737,595,962,968,749,600 | 29.744898 | 128 | 0.50614 | false |
gbourdin/charlas | ml-notebook-to-prod/examples/mnistExample/mnistapi/mnistapi/__main__.py | 1 | 1833 | import click
from os import cpu_count, environ, execvp
from sys import prefix
from .app import create_app
@click.group()
def cli():
pass
@cli.command()
def run_uwsgi():
"""
Run API through uwsgi server.
"""
# avoid fork problems with tensorflow (and other non-serializable objects)
environ['UWSGI_LAZY_APPS'] = '1'
# explicit http timeout
environ['UWSGI_HTTP_TIMEOUT'] = '60'
# how many uwsgi workers (processes)
environ['UWSGI_WORKERS'] = '{}'.format(cpu_count())
# create one thread for every process, since you're probably cpu-bound
environ['UWSGI_THREADS'] = '1'
# load the mmanager WSGI handler
environ['UWSGI_MODULE'] = 'mnistapi.wsgi:app'
# bind the http server, DO NOT USE UWSGI_HTTP_SOCKET!!!
environ['UWSGI_HTTP'] = ':8081'
# remove sockets/pidfile at exit
environ['UWSGI_VACUUM'] = '1'
# retrieve/set the PythonHome
environ['UWSGI_PYHOME'] = prefix
# increase buffer size a bit
environ['UWSGI_BUFFER_SIZE'] = '8192'
# enable the master process
environ['UWSGI_MASTER'] = '1'
# disable ping logging
environ['UWSGI_ROUTE'] = '^/ping donotlog:'
# keep connection from balancer alive
environ['UWSGI_HTTP_KEEPALIVE'] = '1'
# slower but safe
environ['UWSGI_THUNDER_LOCK'] = '1'
# do not log every request, it's slow and verbose
environ['UWSGI_DISABLE_LOGGING'] = '1'
# close uwsgi if something goes wrong, otherwise uwsgi starts with no app
environ['UWSGI_NEED_APP'] = '1'
# exec the uwsgi binary
execvp('uwsgi', ('uwsgi',))
@cli.command()
@click.option('-h', '--host', 'host', default='localhost')
def run_server(host):
app = create_app()
app.run(debug=True, host=host, port=8080, threaded=False, processes=1, use_reloader=False)
if __name__ == '__main__':
cli()
| gpl-2.0 | -5,149,527,838,607,180,000 | 29.55 | 94 | 0.648663 | false |
Walesson/angular2-movie-app | node_modules/node-forge/tests/forge_ssl/forge/ssl.py | 169 | 16598 | # Wrapper module for _ssl, providing some additional facilities
# implemented in Python. Written by Bill Janssen.
"""\
This module provides some more Pythonic support for SSL.
Object types:
SSLSocket -- subtype of socket.socket which does SSL over the socket
Exceptions:
SSLError -- exception raised for I/O errors
Functions:
cert_time_to_seconds -- convert time string used for certificate
notBefore and notAfter functions to integer
seconds past the Epoch (the time values
returned from time.time())
fetch_server_certificate (HOST, PORT) -- fetch the certificate provided
by the server running on HOST at port PORT. No
validation of the certificate is performed.
Integer constants:
SSL_ERROR_ZERO_RETURN
SSL_ERROR_WANT_READ
SSL_ERROR_WANT_WRITE
SSL_ERROR_WANT_X509_LOOKUP
SSL_ERROR_SYSCALL
SSL_ERROR_SSL
SSL_ERROR_WANT_CONNECT
SSL_ERROR_EOF
SSL_ERROR_INVALID_ERROR_CODE
The following group define certificate requirements that one side is
allowing/requiring from the other side:
CERT_NONE - no certificates from the other side are required (or will
be looked at if provided)
CERT_OPTIONAL - certificates are not required, but if provided will be
validated, and if validation fails, the connection will
also fail
CERT_REQUIRED - certificates are required, and will be validated, and
if validation fails, the connection will also fail
The following constants identify various SSL protocol variants:
PROTOCOL_SSLv2
PROTOCOL_SSLv3
PROTOCOL_SSLv23
PROTOCOL_TLSv1
The following constants identify various SSL session caching modes:
SESS_CACHE_OFF
SESS_CACHE_CLIENT
SESS_CACHE_SERVER
SESS_CACHE_BOTH
"""
import textwrap
import _forge_ssl # if we can't import it, let the error propagate
from _forge_ssl import SSLError
from _forge_ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
from _forge_ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
from _forge_ssl import SESS_CACHE_OFF, SESS_CACHE_CLIENT, SESS_CACHE_SERVER, SESS_CACHE_BOTH
from _forge_ssl import RAND_status, RAND_egd, RAND_add
from _forge_ssl import \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
from socket import socket, _fileobject, _delegate_methods
from socket import error as socket_error
from socket import getnameinfo as _getnameinfo
import base64 # for DER-to-PEM translation
import errno
class SSLSocket(socket):
"""This class implements a subtype of socket.socket that wraps
the underlying OS socket in an SSL context when necessary, and
provides read and write methods over that channel."""
def __init__(self, parent_socket, sock, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23,
sess_cache_mode=SESS_CACHE_SERVER,
sess_id_ctx=None,
ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
socket.__init__(self, _sock=sock._sock)
# The initializer for socket overrides the methods send(), recv(), etc.
# in the instancce, which we don't need -- but we want to provide the
# methods defined in SSLSocket.
for attr in _delegate_methods:
try:
delattr(self, attr)
except AttributeError:
pass
if certfile and not keyfile:
keyfile = certfile
create = True
connected = False
if not server_side:
# see if it's connected
try:
socket.getpeername(self)
connected = True
except socket_error, e:
if e.errno != errno.ENOTCONN:
raise
# no, no connection yet
self._sslobj = None
create = False
if create:
# yes, create the SSL object
if parent_socket == None:
self._sslobj = _forge_ssl.sslwrap(
self._sock,
server_side,
keyfile, certfile,
cert_reqs, ssl_version,
sess_cache_mode, sess_id_ctx,
ca_certs)
else:
self._sslobj = parent_socket._sslobj.wrap_accepted(self._sock)
if connected and do_handshake_on_connect:
self.do_handshake()
self.keyfile = keyfile
self.certfile = certfile
self.cert_reqs = cert_reqs
self.ssl_version = ssl_version
self.sess_cache_mode = sess_cache_mode
self.sess_id_ctx = sess_id_ctx
self.ca_certs = ca_certs
self.do_handshake_on_connect = do_handshake_on_connect
self.suppress_ragged_eofs = suppress_ragged_eofs
self._makefile_refs = 0
def read(self, len=1024):
"""Read up to LEN bytes and return them.
Return zero-length string on EOF."""
try:
return self._sslobj.read(len)
except SSLError, x:
if x.args[0] == SSL_ERROR_EOF and self.suppress_ragged_eofs:
return ''
else:
raise
def write(self, data):
"""Write DATA to the underlying SSL channel. Returns
number of bytes of DATA actually transmitted."""
return self._sslobj.write(data)
def getpeercert(self, binary_form=False):
"""Returns a formatted version of the data in the
certificate provided by the other end of the SSL channel.
Return None if no certificate was provided, {} if a
certificate was provided, but not validated."""
return self._sslobj.peer_certificate(binary_form)
def cipher(self):
if not self._sslobj:
return None
else:
return self._sslobj.cipher()
def send(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to send() on %s" %
self.__class__)
while True:
try:
v = self._sslobj.write(data)
except SSLError, x:
if x.args[0] == SSL_ERROR_WANT_READ:
return 0
elif x.args[0] == SSL_ERROR_WANT_WRITE:
return 0
else:
raise
else:
return v
else:
return socket.send(self, data, flags)
def sendto(self, data, addr, flags=0):
if self._sslobj:
raise ValueError("sendto not allowed on instances of %s" %
self.__class__)
else:
return socket.sendto(self, data, addr, flags)
def sendall(self, data, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to sendall() on %s" %
self.__class__)
amount = len(data)
count = 0
while (count < amount):
v = self.send(data[count:])
count += v
return amount
else:
return socket.sendall(self, data, flags)
def recv(self, buflen=1024, flags=0):
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv() on %s" %
self.__class__)
return self.read(buflen)
else:
return socket.recv(self, buflen, flags)
def recv_into(self, buffer, nbytes=None, flags=0):
if buffer and (nbytes is None):
nbytes = len(buffer)
elif nbytes is None:
nbytes = 1024
if self._sslobj:
if flags != 0:
raise ValueError(
"non-zero flags not allowed in calls to recv_into() on %s" %
self.__class__)
tmp_buffer = self.read(nbytes)
v = len(tmp_buffer)
buffer[:v] = tmp_buffer
return v
else:
return socket.recv_into(self, buffer, nbytes, flags)
def recvfrom(self, addr, buflen=1024, flags=0):
if self._sslobj:
raise ValueError("recvfrom not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom(self, addr, buflen, flags)
def recvfrom_into(self, buffer, nbytes=None, flags=0):
if self._sslobj:
raise ValueError("recvfrom_into not allowed on instances of %s" %
self.__class__)
else:
return socket.recvfrom_into(self, buffer, nbytes, flags)
def pending(self):
if self._sslobj:
return self._sslobj.pending()
else:
return 0
def unwrap(self):
if self._sslobj:
try:
# if connected then shutdown
self.getpeername()
s = self._sslobj.shutdown()
except:
s = self._sock
self._sslobj = None
return s
else:
raise ValueError("No SSL wrapper around " + str(self))
def shutdown(self, how):
self._sslobj = None
socket.shutdown(self, how)
def close(self):
if self._makefile_refs < 1:
if self._sslobj:
self.unwrap()
socket.close(self)
else:
self._makefile_refs -= 1
def do_handshake(self):
"""Perform a TLS/SSL handshake."""
self._sslobj.do_handshake()
def connect(self, addr):
"""Connects to remote ADDR, and then wraps the connection in
an SSL channel."""
# Here we assume that the socket is client-side, and not
# connected at the time of the call. We connect it, then wrap it.
if self._sslobj:
raise ValueError("attempt to connect already-connected SSLSocket!")
socket.connect(self, addr)
self._sslobj = _forge_ssl.sslwrap(self._sock, False,
self.keyfile, self.certfile,
self.cert_reqs, self.ssl_version,
self.sess_cache_mode,
self.sess_id_ctx,
self.ca_certs)
if self.do_handshake_on_connect:
self.do_handshake()
def accept(self):
"""Accepts a new connection from a remote client, and returns
a tuple containing that new connection wrapped with a server-side
SSL channel, and the address of the remote client."""
newsock, addr = socket.accept(self)
return (SSLSocket(self,
newsock,
keyfile=self.keyfile,
certfile=self.certfile,
server_side=True,
cert_reqs=self.cert_reqs,
ssl_version=self.ssl_version,
sess_cache_mode=self.sess_cache_mode,
sess_id_ctx=self.sess_id_ctx,
ca_certs=self.ca_certs,
do_handshake_on_connect=self.do_handshake_on_connect,
suppress_ragged_eofs=self.suppress_ragged_eofs),
addr)
def makefile(self, mode='r', bufsize=-1):
"""Make and return a file-like object that
works with the SSL connection. Just use the code
from the socket module."""
self._makefile_refs += 1
# close=True so as to decrement the reference count when done with
# the file-like object.
return _fileobject(self, mode, bufsize, close=True)
def wrap_socket(sock, parent_socket=None, keyfile=None, certfile=None,
server_side=False, cert_reqs=CERT_NONE,
ssl_version=PROTOCOL_SSLv23,
sess_cache_mode=SESS_CACHE_SERVER,
sess_id_ctx=None,
ca_certs=None,
do_handshake_on_connect=True,
suppress_ragged_eofs=True):
return SSLSocket(parent_socket,
sock, keyfile=keyfile, certfile=certfile,
server_side=server_side, cert_reqs=cert_reqs,
ssl_version=ssl_version,
sess_cache_mode=sess_cache_mode,
sess_id_ctx=sess_id_ctx,
ca_certs=ca_certs,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs)
# some utility functions
def cert_time_to_seconds(cert_time):
"""Takes a date-time string in standard ASN1_print form
("MON DAY 24HOUR:MINUTE:SEC YEAR TIMEZONE") and return
a Python time value in seconds past the epoch."""
import time
return time.mktime(time.strptime(cert_time, "%b %d %H:%M:%S %Y GMT"))
PEM_HEADER = "-----BEGIN CERTIFICATE-----"
PEM_FOOTER = "-----END CERTIFICATE-----"
def DER_cert_to_PEM_cert(der_cert_bytes):
"""Takes a certificate in binary DER format and returns the
PEM version of it as a string."""
if hasattr(base64, 'standard_b64encode'):
# preferred because older API gets line-length wrong
f = base64.standard_b64encode(der_cert_bytes)
return (PEM_HEADER + '\n' +
textwrap.fill(f, 64) + '\n' +
PEM_FOOTER + '\n')
else:
return (PEM_HEADER + '\n' +
base64.encodestring(der_cert_bytes) +
PEM_FOOTER + '\n')
def PEM_cert_to_DER_cert(pem_cert_string):
"""Takes a certificate in ASCII PEM format and returns the
DER-encoded version of it as a byte sequence"""
if not pem_cert_string.startswith(PEM_HEADER):
raise ValueError("Invalid PEM encoding; must start with %s"
% PEM_HEADER)
if not pem_cert_string.strip().endswith(PEM_FOOTER):
raise ValueError("Invalid PEM encoding; must end with %s"
% PEM_FOOTER)
d = pem_cert_string.strip()[len(PEM_HEADER):-len(PEM_FOOTER)]
return base64.decodestring(d)
def get_server_certificate(addr, ssl_version=PROTOCOL_SSLv3, ca_certs=None):
"""Retrieve the certificate from the server at the specified address,
and return it as a PEM-encoded string.
If 'ca_certs' is specified, validate the server cert against it.
If 'ssl_version' is specified, use it in the connection attempt."""
host, port = addr
if (ca_certs is not None):
cert_reqs = CERT_REQUIRED
else:
cert_reqs = CERT_NONE
s = wrap_socket(socket(), ssl_version=ssl_version,
cert_reqs=cert_reqs, ca_certs=ca_certs)
s.connect(addr)
dercert = s.getpeercert(True)
s.close()
return DER_cert_to_PEM_cert(dercert)
def get_protocol_name(protocol_code):
if protocol_code == PROTOCOL_TLSv1:
return "TLSv1"
elif protocol_code == PROTOCOL_SSLv23:
return "SSLv23"
elif protocol_code == PROTOCOL_SSLv2:
return "SSLv2"
elif protocol_code == PROTOCOL_SSLv3:
return "SSLv3"
else:
return "<unknown>"
# a replacement for the old socket.ssl function
def sslwrap_simple(sock, keyfile=None, certfile=None):
"""A replacement for the old socket.ssl function. Designed
for compability with Python 2.5 and earlier. Will disappear in
Python 3.0."""
if hasattr(sock, "_sock"):
sock = sock._sock
ssl_sock = _forge_ssl.sslwrap(sock, 0, keyfile, certfile,
CERT_NONE, PROTOCOL_SSLv23,
SESS_CACHE_SERVER, None, None)
try:
sock.getpeername()
except:
# no, no connection yet
pass
else:
# yes, do the handshake
ssl_sock.do_handshake()
return ssl_sock
| gpl-3.0 | 1,523,085,738,166,268,200 | 33.152263 | 92 | 0.558983 | false |
aristotle-tek/cuny-bdif | AWS/ec2/lib/boto-2.34.0/tests/unit/vpc/test_routetable.py | 64 | 20068 | from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, RouteTable
class TestDescribeRouteTables(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeRouteTablesResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>6f570b0b-9c18-4b07-bdec-73740dcf861a</requestId>
<routeTableSet>
<item>
<routeTableId>rtb-13ad487a</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
<origin>CreateRouteTable</origin>
</item>
</routeSet>
<associationSet>
<item>
<routeTableAssociationId>rtbassoc-12ad487b</routeTableAssociationId>
<routeTableId>rtb-13ad487a</routeTableId>
<main>true</main>
</item>
</associationSet>
<tagSet/>
</item>
<item>
<routeTableId>rtb-f9ad4890</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
<origin>CreateRouteTable</origin>
</item>
<item>
<destinationCidrBlock>0.0.0.0/0</destinationCidrBlock>
<gatewayId>igw-eaad4883</gatewayId>
<state>active</state>
</item>
<item>
<destinationCidrBlock>10.0.0.0/21</destinationCidrBlock>
<networkInterfaceId>eni-884ec1d1</networkInterfaceId>
<state>blackhole</state>
<origin>CreateRoute</origin>
</item>
<item>
<destinationCidrBlock>11.0.0.0/22</destinationCidrBlock>
<vpcPeeringConnectionId>pcx-efc52b86</vpcPeeringConnectionId>
<state>blackhole</state>
<origin>CreateRoute</origin>
</item>
</routeSet>
<associationSet>
<item>
<routeTableAssociationId>rtbassoc-faad4893</routeTableAssociationId>
<routeTableId>rtb-f9ad4890</routeTableId>
<subnetId>subnet-15ad487c</subnetId>
</item>
</associationSet>
<tagSet/>
</item>
</routeTableSet>
</DescribeRouteTablesResponse>
"""
def test_get_all_route_tables(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_route_tables(
['rtb-13ad487a', 'rtb-f9ad4890'], filters=[('route.state', 'active')])
self.assert_request_parameters({
'Action': 'DescribeRouteTables',
'RouteTableId.1': 'rtb-13ad487a',
'RouteTableId.2': 'rtb-f9ad4890',
'Filter.1.Name': 'route.state',
'Filter.1.Value.1': 'active'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 2)
self.assertIsInstance(api_response[0], RouteTable)
self.assertEquals(api_response[0].id, 'rtb-13ad487a')
self.assertEquals(len(api_response[0].routes), 1)
self.assertEquals(api_response[0].routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response[0].routes[0].gateway_id, 'local')
self.assertEquals(api_response[0].routes[0].state, 'active')
self.assertEquals(len(api_response[0].associations), 1)
self.assertEquals(api_response[0].associations[0].id, 'rtbassoc-12ad487b')
self.assertEquals(api_response[0].associations[0].route_table_id, 'rtb-13ad487a')
self.assertIsNone(api_response[0].associations[0].subnet_id)
self.assertEquals(api_response[0].associations[0].main, True)
self.assertEquals(api_response[1].id, 'rtb-f9ad4890')
self.assertEquals(len(api_response[1].routes), 4)
self.assertEquals(api_response[1].routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response[1].routes[0].gateway_id, 'local')
self.assertEquals(api_response[1].routes[0].state, 'active')
self.assertEquals(api_response[1].routes[1].destination_cidr_block, '0.0.0.0/0')
self.assertEquals(api_response[1].routes[1].gateway_id, 'igw-eaad4883')
self.assertEquals(api_response[1].routes[1].state, 'active')
self.assertEquals(api_response[1].routes[2].destination_cidr_block, '10.0.0.0/21')
self.assertEquals(api_response[1].routes[2].interface_id, 'eni-884ec1d1')
self.assertEquals(api_response[1].routes[2].state, 'blackhole')
self.assertEquals(api_response[1].routes[3].destination_cidr_block, '11.0.0.0/22')
self.assertEquals(api_response[1].routes[3].vpc_peering_connection_id, 'pcx-efc52b86')
self.assertEquals(api_response[1].routes[3].state, 'blackhole')
self.assertEquals(len(api_response[1].associations), 1)
self.assertEquals(api_response[1].associations[0].id, 'rtbassoc-faad4893')
self.assertEquals(api_response[1].associations[0].route_table_id, 'rtb-f9ad4890')
self.assertEquals(api_response[1].associations[0].subnet_id, 'subnet-15ad487c')
self.assertEquals(api_response[1].associations[0].main, False)
class TestAssociateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<AssociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<associationId>rtbassoc-f8ad4891</associationId>
</AssociateRouteTableResponse>
"""
def test_associate_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.associate_route_table(
'rtb-e4ad488d', 'subnet-15ad487c')
self.assert_request_parameters({
'Action': 'AssociateRouteTable',
'RouteTableId': 'rtb-e4ad488d',
'SubnetId': 'subnet-15ad487c'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, 'rtbassoc-f8ad4891')
class TestDisassociateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DisassociateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DisassociateRouteTableResponse>
"""
def test_disassociate_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.disassociate_route_table('rtbassoc-fdad4894')
self.assert_request_parameters({
'Action': 'DisassociateRouteTable',
'AssociationId': 'rtbassoc-fdad4894'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestCreateRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<routeTable>
<routeTableId>rtb-f9ad4890</routeTableId>
<vpcId>vpc-11ad4878</vpcId>
<routeSet>
<item>
<destinationCidrBlock>10.0.0.0/22</destinationCidrBlock>
<gatewayId>local</gatewayId>
<state>active</state>
</item>
</routeSet>
<associationSet/>
<tagSet/>
</routeTable>
</CreateRouteTableResponse>
"""
def test_create_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route_table('vpc-11ad4878')
self.assert_request_parameters({
'Action': 'CreateRouteTable',
'VpcId': 'vpc-11ad4878'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, RouteTable)
self.assertEquals(api_response.id, 'rtb-f9ad4890')
self.assertEquals(len(api_response.routes), 1)
self.assertEquals(api_response.routes[0].destination_cidr_block, '10.0.0.0/22')
self.assertEquals(api_response.routes[0].gateway_id, 'local')
self.assertEquals(api_response.routes[0].state, 'active')
class TestDeleteRouteTable(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteRouteTableResponse>
"""
def test_delete_route_table(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_route_table('rtb-e4ad488d')
self.assert_request_parameters({
'Action': 'DeleteRouteTable',
'RouteTableId': 'rtb-e4ad488d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestReplaceRouteTableAssociation(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<ReplaceRouteTableAssociationResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<newAssociationId>rtbassoc-faad4893</newAssociationId>
</ReplaceRouteTableAssociationResponse>
"""
def test_replace_route_table_assocation(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route_table_assocation(
'rtbassoc-faad4893', 'rtb-f9ad4890')
self.assert_request_parameters({
'Action': 'ReplaceRouteTableAssociation',
'AssociationId': 'rtbassoc-faad4893',
'RouteTableId': 'rtb-f9ad4890'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_table_association_with_assoc(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route_table_association_with_assoc(
'rtbassoc-faad4893', 'rtb-f9ad4890')
self.assert_request_parameters({
'Action': 'ReplaceRouteTableAssociation',
'AssociationId': 'rtbassoc-faad4893',
'RouteTableId': 'rtb-f9ad4890'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, 'rtbassoc-faad4893')
class TestCreateRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</CreateRouteResponse>
"""
def test_create_route_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-e4ad488d', '0.0.0.0/0', gateway_id='igw-eaad4883')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '0.0.0.0/0',
'GatewayId': 'igw-eaad4883'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_instance(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', instance_id='i-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'InstanceId': 'i-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_interface(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', interface_id='eni-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'NetworkInterfaceId': 'eni-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_create_route_vpc_peering_connection(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_route(
'rtb-g8ff4ea2', '0.0.0.0/0', vpc_peering_connection_id='pcx-1a2b3c4d')
self.assert_request_parameters({
'Action': 'CreateRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'VpcPeeringConnectionId': 'pcx-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestReplaceRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateRouteResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</CreateRouteResponse>
"""
def test_replace_route_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-e4ad488d', '0.0.0.0/0', gateway_id='igw-eaad4883')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '0.0.0.0/0',
'GatewayId': 'igw-eaad4883'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_instance(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', instance_id='i-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'InstanceId': 'i-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_interface(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', interface_id='eni-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'NetworkInterfaceId': 'eni-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_replace_route_vpc_peering_connection(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.replace_route(
'rtb-g8ff4ea2', '0.0.0.0/0', vpc_peering_connection_id='pcx-1a2b3c4d')
self.assert_request_parameters({
'Action': 'ReplaceRoute',
'RouteTableId': 'rtb-g8ff4ea2',
'DestinationCidrBlock': '0.0.0.0/0',
'VpcPeeringConnectionId': 'pcx-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestDeleteRoute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteRouteTableResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteRouteTableResponse>
"""
def test_delete_route(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_route('rtb-e4ad488d', '172.16.1.0/24')
self.assert_request_parameters({
'Action': 'DeleteRoute',
'RouteTableId': 'rtb-e4ad488d',
'DestinationCidrBlock': '172.16.1.0/24'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
| mit | -4,407,155,660,732,727,000 | 44.609091 | 99 | 0.568019 | false |
rc0r/afl-utils | afl_utils/afl_stats.py | 1 | 15342 | """
Copyright 2015-2016 @_rc0r <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
try:
import simplejson as json
except ImportError:
import json
import os
import sys
import socket
import twitter
from urllib.error import URLError
import afl_utils
from afl_utils.AflPrettyPrint import clr, print_ok, print_warn, print_err
from db_connectors import con_sqlite
db_table_spec = """`id` INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, `last_update` INTEGER NOT NULL, `start_time`INTEGER NOT NULL,
`fuzzer_pid` INTEGER NOT NULL, `cycles_done` INTEGER NOT NULL, `execs_done` INTEGER NOT NULL,
`execs_per_sec` REAL NOT NULL, `paths_total` INTEGER NOT NULL, `paths_favored` INTEGER NOT NULL,
`paths_found` INTEGER NOT NULL, `paths_imported` INTEGER NOT NULL, `max_depth` INTEGER NOT NULL,
`cur_path` INTEGER NOT NULL, `pending_favs` INTEGER NOT NULL, `pending_total` INTEGER NOT NULL,
`variable_paths` INTEGER NOT NULL, `stability` REAL, `bitmap_cvg` REAL NOT NULL,
`unique_crashes` INTEGER NOT NULL, `unique_hangs` INTEGER NOT NULL, `last_path` INTEGER NOT NULL,
`last_crash` INTEGER NOT NULL, `last_hang` INTEGER NOT NULL, `execs_since_crash` INTEGER NOT NULL,
`exec_timeout` INTEGER NOT NULL, `afl_banner` VARCHAR(200) NOT NULL, `afl_version` VARCHAR(10) NOT NULL,
`command_line` VARCHAR(1000)"""
def show_info():
print(clr.CYA + "afl-stats " + clr.BRI + "%s" % afl_utils.__version__ + clr.RST + " by %s" % afl_utils.__author__)
print("Send stats of afl-fuzz jobs to Twitter.")
print("")
def read_config(config_file):
config_file = os.path.abspath(os.path.expanduser(config_file))
if not os.path.isfile(config_file):
print_err("Config file not found!")
sys.exit(1)
with open(config_file, 'r') as raw_config:
config = json.load(raw_config)
return config
def twitter_init(config):
try:
config['twitter_creds_file'] = os.path.abspath(os.path.expanduser(config['twitter_creds_file']))
if not os.path.exists(config['twitter_creds_file']):
twitter.oauth_dance("fuzzer_stats", config['twitter_consumer_key'],
config['twitter_consumer_secret'], config['twitter_creds_file'])
oauth_token, oauth_secret = twitter.read_token_file(config['twitter_creds_file'])
twitter_instance = twitter.Twitter(auth=twitter.OAuth(oauth_token, oauth_secret,
config['twitter_consumer_key'],
config['twitter_consumer_secret']))
return twitter_instance
except (twitter.TwitterHTTPError, URLError):
print_err("Network error, twitter login failed! Check your connection!")
sys.exit(1)
def shorten_tweet(tweet):
if len(tweet) > 140:
print_ok("Status too long, will be shortened to 140 chars!")
short_tweet = tweet[:137] + "..."
else:
short_tweet = tweet
return short_tweet
def fuzzer_alive(pid):
try:
os.kill(pid, 0)
except (OSError, ProcessLookupError):
return 0
return 1
def parse_stat_file(stat_file, summary=True):
try:
f = open(stat_file, "r")
lines = f.readlines()
f.close()
summary_stats = {
'fuzzer_pid': None,
'execs_done': None,
'execs_per_sec': None,
'paths_total': None,
'paths_favored': None,
'pending_favs': None,
'pending_total': None,
'unique_crashes': None,
'unique_hangs': None,
'afl_banner': None
}
complete_stats = {
'last_update': '',
'start_time': '',
'fuzzer_pid': '',
'cycles_done': '',
'execs_done': '',
'execs_per_sec': '',
'paths_total': '',
'paths_favored': '',
'paths_found': '',
'paths_imported': '',
'max_depth': '',
'cur_path': '',
'pending_favs': '',
'pending_total': '',
'variable_paths': '',
'stability': '',
'bitmap_cvg': '',
'unique_crashes': '',
'unique_hangs': '',
'last_path': '',
'last_crash': '',
'last_hang': '',
'execs_since_crash': '',
'exec_timeout': '',
'afl_banner': '',
'afl_version': '',
'command_line': ''
}
for l in lines:
if summary:
stats = summary_stats
for k in stats.keys():
if k != "fuzzer_pid":
if k in l:
stats[k] = l[19:].strip(": \r\n")
else:
if k in l:
stats[k] = fuzzer_alive(int(l[19:].strip(": \r\n")))
else:
stats = complete_stats
for k in stats.keys():
if k in l:
stats[k] = l[19:].strip(": %\r\n")
return stats
except FileNotFoundError as e:
print_warn("Stat file " + clr.GRA + "%s" % e.filename + clr.RST + " not found!")
return None
def load_stats(fuzzer_dir, summary=True):
fuzzer_dir = os.path.abspath(os.path.expanduser(fuzzer_dir))
if not os.path.isdir(fuzzer_dir):
print_warn("Invalid fuzzing directory specified: " + clr.GRA + "%s" % fuzzer_dir + clr.RST)
return None
fuzzer_stats = []
if os.path.isfile(os.path.join(fuzzer_dir, "fuzzer_stats")):
# single afl-fuzz job
stats = parse_stat_file(os.path.join(fuzzer_dir, "fuzzer_stats"), summary)
if stats:
fuzzer_stats.append(stats)
else:
fuzzer_inst = []
for fdir in os.listdir(fuzzer_dir):
if os.path.isdir(os.path.join(fuzzer_dir, fdir)):
fuzzer_inst.append(os.path.join(fuzzer_dir, fdir, "fuzzer_stats"))
for stat_file in fuzzer_inst:
stats = parse_stat_file(stat_file, summary)
if stats:
fuzzer_stats.append(stats)
return fuzzer_stats
def summarize_stats(stats):
sum_stat = {
'fuzzers': len(stats),
'fuzzer_pid': 0,
'execs_done': 0,
'execs_per_sec': 0,
'paths_total': 0,
'paths_favored': 0,
'pending_favs': 0,
'pending_total': 0,
'unique_crashes': 0,
'unique_hangs': 0,
'afl_banner': 0,
'host': socket.gethostname()[:10]
}
for s in stats:
for k in sum_stat.keys():
if k in s.keys():
if k != "afl_banner":
sum_stat[k] += float(s[k])
else:
sum_stat[k] = s[k][:10]
return sum_stat
def diff_stats(sum_stats, old_stats):
if len(sum_stats) != len(old_stats):
print_warn("Stats corrupted for '" + clr.GRA + "%s" % sum_stats['afl_banner'] + clr.RST + "'!")
return None
diff_stat = {
'fuzzers': len(sum_stats),
'fuzzer_pid': 0,
'execs_done': 0,
'execs_per_sec': 0,
'paths_total': 0,
'paths_favored': 0,
'pending_favs': 0,
'pending_total': 0,
'unique_crashes': 0,
'unique_hangs': 0,
'afl_banner': 0,
'host': socket.gethostname()[:10]
}
for k in sum_stats.keys():
if k not in ['afl_banner', 'host']:
diff_stat[k] = sum_stats[k] - old_stats[k]
else:
diff_stat[k] = sum_stats[k]
return diff_stat
def prettify_stat(stat, dstat, console=True):
_stat = stat.copy()
_dstat = dstat.copy()
_stat['execs_done'] /= 1e6
_dstat['execs_done'] /= 1e6
if _dstat['fuzzer_pid'] == _dstat['fuzzers'] == 0:
ds_alive = ""
else:
ds_alive = " (%+d/%+d)" % (_dstat['fuzzer_pid'], _dstat['fuzzers'])
# if int(_dstat['execs_done']) == 0:
if _dstat['execs_done'] == 0:
ds_exec = " "
else:
ds_exec = " (%+d) " % _dstat['execs_done']
if _dstat['execs_per_sec'] == 0:
ds_speed = " "
else:
ds_speed = " (%+1.f) " % _dstat['execs_per_sec']
if _dstat['pending_total'] == _dstat['pending_favs'] == 0:
ds_pend = ""
else:
ds_pend = " (%+d/%+d)" % (_dstat['pending_total'], _dstat['pending_favs'])
if _dstat['unique_crashes'] == 0:
ds_crash = ""
else:
ds_crash = " (%+d)" % _dstat['unique_crashes']
if console:
# colorize stats
_stat['afl_banner'] = clr.BLU + _stat['afl_banner'] + clr.RST
_stat['host'] = clr.LBL + _stat['host'] + clr.RST
lbl = clr.GRA
if _stat['fuzzer_pid'] == 0:
alc = clr.LRD
slc = clr.GRA
else:
alc = clr.LGN if _stat['fuzzer_pid'] == _stat['fuzzers'] else clr.YEL
slc = ""
clc = clr.MGN if _stat['unique_crashes'] == 0 else clr.LRD
rst = clr.RST
# colorize diffs
if _dstat['fuzzer_pid'] < 0 or _dstat['fuzzers'] < 0:
ds_alive = clr.RED + ds_alive + clr.RST
else:
ds_alive = clr.GRN + ds_alive + clr.RST
# if int(_dstat['execs_done']) < 0:
if _dstat['execs_done'] < 0:
ds_exec = clr.RED + ds_exec + clr.RST
else:
ds_exec = clr.GRN + ds_exec + clr.RST
if _dstat['execs_per_sec'] < 0:
ds_speed = clr.RED + ds_speed + clr.RST
else:
ds_speed = clr.GRN + ds_speed + clr.RST
if _dstat['unique_crashes'] < 0:
ds_crash = clr.RED + ds_crash + clr.RST
else:
ds_crash = clr.GRN + ds_crash + clr.RST
ds_pend = clr.GRA + ds_pend + clr.RST
pretty_stat =\
"[%s on %s]\n %sAlive:%s %s%d/%d%s%s\n %sExecs:%s %d%sm\n %sSpeed:%s %s%.1f%sx/s%s\n %sPend:%s %d/%d%s\n" \
" %sCrashes:%s %s%d%s%s" % (_stat['afl_banner'], _stat['host'], lbl, rst, alc, _stat['fuzzer_pid'],
_stat['fuzzers'], rst, ds_alive, lbl, rst, _stat['execs_done'], ds_exec, lbl, rst, slc,
_stat['execs_per_sec'], ds_speed, rst, lbl, rst, _stat['pending_total'],
_stat['pending_favs'], ds_pend, lbl, rst, clc, _stat['unique_crashes'], rst, ds_crash)
else:
pretty_stat = "[%s #%s]\nAlive: %d/%d%s\nExecs: %d%sm\nSpeed: %.1f%sx/s\n" \
"Pend: %d/%d%s\nCrashes: %d%s" %\
(_stat['afl_banner'], _stat['host'], _stat['fuzzer_pid'], _stat['fuzzers'], ds_alive,
_stat['execs_done'], ds_exec, _stat['execs_per_sec'], ds_speed,
_stat['pending_total'], _stat['pending_favs'], ds_pend, _stat['unique_crashes'], ds_crash)
return pretty_stat
def dump_stats(config_settings, database):
for sync_dir in config_settings['fuzz_dirs']:
fuzzer_stats = load_stats(sync_dir, summary=False)
for fuzzer in fuzzer_stats:
# create different table for every afl instance
# table = 'fuzzer_stats_{}'.format(fuzzer['afl_banner'])
#
# django compatible: put everything into one table (according
# to django plots app model)
# Differentiate data based on afl_banner, so don't override
# it manually! afl-multicore will create a unique banner for
# every fuzzer!
table = 'aflutils_fuzzerstats'
database.init_database(table, db_table_spec)
if not database.dataset_exists(table, fuzzer, ['last_update', 'afl_banner']):
database.insert_dataset(table, fuzzer)
def fetch_stats(config_settings, twitter_inst):
stat_dict = dict()
for fuzzer in config_settings['fuzz_dirs']:
stats = load_stats(fuzzer)
if not stats:
continue
sum_stats = summarize_stats(stats)
try:
with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'r') as f:
old_stats = json.load(f)
except FileNotFoundError:
old_stats = sum_stats.copy()
# initialize/update stat_dict
stat_dict[fuzzer] = (sum_stats, old_stats)
stat_change = diff_stats(sum_stats, old_stats)
with open('.afl_stats.{}'.format(os.path.basename(fuzzer)), 'w') as f:
json.dump(sum_stats, f)
print(prettify_stat(sum_stats, stat_change, True))
tweet = prettify_stat(sum_stats, stat_change, False)
l = len(tweet)
c = clr.LRD if l > 140 else clr.LGN
if twitter_inst:
print_ok("Tweeting status (%s%d" % (c, l) + clr.RST + " chars)...")
try:
twitter_inst.statuses.update(status=shorten_tweet(tweet))
except (twitter.TwitterHTTPError, URLError):
print_warn("Problem connecting to Twitter! Tweet not sent!")
except Exception as e:
print_err("Sending tweet failed (Reason: " + clr.GRA + "%s" % e.__cause__ + clr.RST + ")")
def main(argv):
parser = argparse.ArgumentParser(description="Post selected contents of fuzzer_stats to Twitter.",
usage="afl-stats [-h] [-c config] [-d database] [-t]\n")
parser.add_argument("-c", "--config", dest="config_file",
help="afl-stats config file (Default: afl-stats.conf)!", default="afl-stats.conf")
parser.add_argument("-d", "--database", dest="database_file",
help="Dump stats history into database.")
parser.add_argument('-t', '--twitter', dest='twitter', action='store_const', const=True,
help='Post stats to twitter (Default: off).', default=False)
parser.add_argument('-q', '--quiet', dest='quiet', action='store_const', const=True,
help='Suppress any output (Default: off).', default=False)
args = parser.parse_args(argv[1:])
if not args.quiet:
show_info()
if args.database_file:
db_file = os.path.abspath(os.path.expanduser(args.database_file))
else:
db_file = None
if db_file:
lite_db = con_sqlite.sqliteConnector(db_file, verbose=False)
else:
lite_db = None
config_settings = read_config(args.config_file)
if lite_db:
dump_stats(config_settings, lite_db)
lite_db.commit_close()
if args.twitter:
twitter_inst = twitter_init(config_settings)
else:
twitter_inst = None
fetch_stats(config_settings, twitter_inst)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | 3,642,230,502,189,135,000 | 33.868182 | 129 | 0.537023 | false |
jmdejong/Asciifarm | asciifarm/server/entity.py | 1 | 4171 |
from . import serialize
from .eventtarget import EventTarget
class Entity:
""" Attempt to implement an entity component system
This is the base object
Components are given on construction.
Once a component is added to the object the attach method will be called on the component (if it has one).
The attach method is used to pass the entity and room events to the component.
When the entity is removed, all components will have their remove method called if they have one.
Remove methods are for cleanup, like unsubscribing from events.
"""
def __init__(self, sprite=' ', height=0, name=None, components=None, flags=None):
if components is None:
components = {}
if flags is None:
flags = set()
self.sprite = sprite # the name of the image to display for this entity
self.height = height # if multiple objects are on a square, the tallest one is drawn
self.name = name if name else sprite # human readable name/description
self.components = components
self.observable = EventTarget()
self.flags = set(flags)
self.ground = None
self.roomData = None
for component in self.components.values():
component.attach(self)
def construct(self, roomData, preserve=False, stamp=None):
self.roomData = roomData
if preserve:
roomData.preserveObject(self)
self._preserve()
if stamp is None:
stamp = roomData.getStamp()
self.trigger("roomjoin", roomData, stamp)
def hasComponent(self, name):
return name in self.components
def getComponent(self, name):
return self.components.get(name, None)
def place(self, ground):
if self.ground:
self.ground.removeObj(self)
self.ground = ground
ground.addObj(self)
def remove(self):
if self.ground:
self.ground.removeObj(self)
self.ground = None
if self.isPreserved():
self.roomData.removePreserved(self)
for component in self.components.values():
component.remove()
self.trigger("remove")
self.roomData = None
def addListener(self, event, callback, key=None):
self.observable.addListener(event, callback, key)
def removeListener(self, event, key):
self.observable.removeListener(event, key)
def trigger(self, event, *args, **kwargs):
self.observable.trigger(event, self, *args, **kwargs)
def getSprite(self):
return self.sprite
def getName(self):
return self.name
def getHeight(self):
return self.height
def inRoom(self):
return self.ground is not None
def getGround(self):
return self.ground
def getNearObjects(self):
return [obj for obj in self.ground.getObjs() if obj != self]
def getFlags(self):
return self.flags
def _preserve(self):
self.flags.add("preserve")
def isPreserved(self):
return "preserve" in self.flags
def toJSON(self):
return {
"sprite": self.sprite,
"name": self.name,
"height": self.height,
"flags": list(self.flags),
"components": {
name: serialize.serialize(comp)
for name, comp in self.components.items()
}
}
def serialize(self):
if "serialize" not in self.components:
return self.toJSON()
return self.components["serialize"].serialize()
@classmethod
def fromJSON(cls, data):
if data is None:
return None
return cls(
sprite = data["sprite"],
name = data["name"],
height = data["height"],
flags = data["flags"],
components = {
name: serialize.unserialize(comp)
for name, comp in data["components"].items()
}
)
def getRoomData(self):
return self.roomData
| gpl-3.0 | 6,339,724,012,334,653,000 | 29.669118 | 110 | 0.585231 | false |
yashrastogi16/python_koans | python2/runner/sensei.py | 3 | 9891 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import re
import sys
import os
import glob
import helper
from mockable_test_result import MockableTestResult
from runner import path_to_enlightenment
from libs.colorama import init, Fore, Style
init() # init colorama
class Sensei(MockableTestResult):
def __init__(self, stream):
unittest.TestResult.__init__(self)
self.stream = stream
self.prevTestClassName = None
self.tests = path_to_enlightenment.koans()
self.pass_count = 0
self.lesson_pass_count = 0
self.all_lessons = None
def startTest(self, test):
MockableTestResult.startTest(self, test)
if helper.cls_name(test) != self.prevTestClassName:
self.prevTestClassName = helper.cls_name(test)
if not self.failures:
self.stream.writeln()
self.stream.writeln("{0}{1}Thinking {2}".format(
Fore.RESET, Style.NORMAL, helper.cls_name(test)))
if helper.cls_name(test) not in ['AboutAsserts', 'AboutExtraCredit']:
self.lesson_pass_count += 1
def addSuccess(self, test):
if self.passesCount():
MockableTestResult.addSuccess(self, test)
self.stream.writeln( \
" {0}{1}{2} has expanded your awareness.{3}{4}" \
.format(Fore.GREEN, Style.BRIGHT, test._testMethodName, \
Fore.RESET, Style.NORMAL))
self.pass_count += 1
def addError(self, test, err):
# Having 1 list for errors and 1 list for failures would mess with
# the error sequence
self.addFailure(test, err)
def passesCount(self):
return not (self.failures and helper.cls_name(self.failures[0][0]) !=
self.prevTestClassName)
def addFailure(self, test, err):
MockableTestResult.addFailure(self, test, err)
def sortFailures(self, testClassName):
table = list()
for test, err in self.failures:
if helper.cls_name(test) == testClassName:
m = re.search("(?<= line )\d+" ,err)
if m:
tup = (int(m.group(0)), test, err)
table.append(tup)
if table:
return sorted(table)
else:
return None
def firstFailure(self):
if not self.failures: return None
table = self.sortFailures(helper.cls_name(self.failures[0][0]))
if table:
return (table[0][1], table[0][2])
else:
return None
def learn(self):
self.errorReport()
self.stream.writeln("")
self.stream.writeln("")
self.stream.writeln(self.report_progress())
if self.failures:
self.stream.writeln(self.report_remaining())
self.stream.writeln("")
self.stream.writeln(self.say_something_zenlike())
if self.failures: sys.exit(-1)
self.stream.writeln(
"\n{0}**************************************************" \
.format(Fore.RESET))
self.stream.writeln("\n{0}That was the last one, well done!" \
.format(Fore.MAGENTA))
self.stream.writeln(
"\nIf you want more, take a look at about_extra_credit_task.py")
def errorReport(self):
problem = self.firstFailure()
if not problem: return
test, err = problem
self.stream.writeln(" {0}{1}{2} has damaged your "
"karma.".format(Fore.RED, Style.BRIGHT, test._testMethodName))
self.stream.writeln("\n{0}{1}You have not yet reached enlightenment ..." \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}".format(Fore.RED, \
Style.BRIGHT, self.scrapeAssertionError(err)))
self.stream.writeln("")
self.stream.writeln("{0}{1}Please meditate on the following code:" \
.format(Fore.RESET, Style.NORMAL))
self.stream.writeln("{0}{1}{2}{3}{4}".format(Fore.YELLOW, Style.BRIGHT, \
self.scrapeInterestingStackDump(err), Fore.RESET, Style.NORMAL))
def scrapeAssertionError(self, err):
if not err: return ""
error_text = ""
count = 0
for line in err.splitlines():
m = re.search("^[^^ ].*$",line)
if m and m.group(0):
count+=1
if count>1:
error_text += (" " + line.strip()).rstrip() + '\n'
return error_text.strip('\n')
def scrapeInterestingStackDump(self, err):
if not err:
return ""
lines = err.splitlines()
sep = '@@@@@SEP@@@@@'
stack_text = ""
for line in lines:
m = re.search("^ File .*$",line)
if m and m.group(0):
stack_text += '\n' + line
m = re.search("^ \w(\w)+.*$",line)
if m and m.group(0):
stack_text += sep + line
lines = stack_text.splitlines()
stack_text = ""
for line in lines:
m = re.search("^.*[/\\\\]koans[/\\\\].*$",line)
if m and m.group(0):
stack_text += line + '\n'
stack_text = stack_text.replace(sep, '\n').strip('\n')
stack_text = re.sub(r'(about_\w+.py)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
stack_text = re.sub(r'(line \d+)',
r"{0}\1{1}".format(Fore.BLUE, Fore.YELLOW), stack_text)
return stack_text
def report_progress(self):
return "You have completed {0} koans and " \
"{1} lessons.".format(
self.pass_count,
self.lesson_pass_count)
def report_remaining(self):
koans_remaining = self.total_koans() - self.pass_count
lessons_remaining = self.total_lessons() - self.lesson_pass_count
return "You are now {0} koans and {1} lessons away from " \
"reaching enlightenment.".format(
koans_remaining,
lessons_remaining)
# Hat's tip to Tim Peters for the zen statements from The 'Zen
# of Python' (http://www.python.org/dev/peps/pep-0020/)
#
# Also a hat's tip to Ara T. Howard for the zen statements from his
# metakoans Ruby Quiz (http://rubyquiz.com/quiz67.html) and
# Edgecase's later permutation in the Ruby Koans
def say_something_zenlike(self):
if self.failures:
turn = self.pass_count % 37
zenness = "";
if turn == 0:
zenness = "Beautiful is better than ugly."
elif turn == 1 or turn == 2:
zenness = "Explicit is better than implicit."
elif turn == 3 or turn == 4:
zenness = "Simple is better than complex."
elif turn == 5 or turn == 6:
zenness = "Complex is better than complicated."
elif turn == 7 or turn == 8:
zenness = "Flat is better than nested."
elif turn == 9 or turn == 10:
zenness = "Sparse is better than dense."
elif turn == 11 or turn == 12:
zenness = "Readability counts."
elif turn == 13 or turn == 14:
zenness = "Special cases aren't special enough to " \
"break the rules."
elif turn == 15 or turn == 16:
zenness = "Although practicality beats purity."
elif turn == 17 or turn == 18:
zenness = "Errors should never pass silently."
elif turn == 19 or turn == 20:
zenness = "Unless explicitly silenced."
elif turn == 21 or turn == 22:
zenness = "In the face of ambiguity, refuse the " \
"temptation to guess."
elif turn == 23 or turn == 24:
zenness = "There should be one-- and preferably only " \
"one --obvious way to do it."
elif turn == 25 or turn == 26:
zenness = "Although that way may not be obvious at " \
"first unless you're Dutch."
elif turn == 27 or turn == 28:
zenness = "Now is better than never."
elif turn == 29 or turn == 30:
zenness = "Although never is often better than right " \
"now."
elif turn == 31 or turn == 32:
zenness = "If the implementation is hard to explain, " \
"it's a bad idea."
elif turn == 33 or turn == 34:
zenness = "If the implementation is easy to explain, " \
"it may be a good idea."
else:
zenness = "Namespaces are one honking great idea -- " \
"let's do more of those!"
return "{0}{1}{2}{3}".format(Fore.CYAN, zenness, Fore.RESET, Style.NORMAL);
else:
return "{0}Nobody ever expects the Spanish Inquisition." \
.format(Fore.CYAN)
# Hopefully this will never ever happen!
return "The temple is collapsing! Run!!!"
def total_lessons(self):
all_lessons = self.filter_all_lessons()
if all_lessons:
return len(all_lessons)
else:
return 0
def total_koans(self):
return self.tests.countTestCases()
def filter_all_lessons(self):
cur_dir = os.path.split(os.path.realpath(__file__))[0]
if not self.all_lessons:
self.all_lessons = glob.glob('{0}/../koans/about*.py'.format(cur_dir))
self.all_lessons = filter(lambda filename:
"about_extra_credit" not in filename,
self.all_lessons)
return self.all_lessons
| mit | -8,106,285,442,590,577,000 | 36.044944 | 87 | 0.528662 | false |
quole/gensim | gensim/corpora/wikicorpus.py | 2 | 13419 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Copyright (C) 2012 Lars Buitinck <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Construct a corpus from a Wikipedia (or other MediaWiki-based) database dump.
If you have the `pattern` package installed, this module will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
See scripts/process_wiki.py for a canned (example) script based on this
module.
"""
import bz2
import logging
import re
from xml.etree.cElementTree import iterparse # LXML isn't faster, so let's go with the built-in solution
import multiprocessing
from gensim import utils
# cannot import whole gensim.corpora, because that imports wikicorpus...
from gensim.corpora.dictionary import Dictionary
from gensim.corpora.textcorpus import TextCorpus
logger = logging.getLogger(__name__)
# ignore articles shorter than ARTICLE_MIN_WORDS characters (after full preprocessing)
ARTICLE_MIN_WORDS = 50
RE_P0 = re.compile('<!--.*?-->', re.DOTALL | re.UNICODE) # comments
RE_P1 = re.compile('<ref([> ].*?)(</ref>|/>)', re.DOTALL | re.UNICODE) # footnotes
RE_P2 = re.compile("(\n\[\[[a-z][a-z][\w-]*:[^:\]]+\]\])+$", re.UNICODE) # links to languages
RE_P3 = re.compile("{{([^}{]*)}}", re.DOTALL | re.UNICODE) # template
RE_P4 = re.compile("{{([^}]*)}}", re.DOTALL | re.UNICODE) # template
RE_P5 = re.compile('\[(\w+):\/\/(.*?)(( (.*?))|())\]', re.UNICODE) # remove URL, keep description
RE_P6 = re.compile("\[([^][]*)\|([^][]*)\]", re.DOTALL | re.UNICODE) # simplify links, keep description
RE_P7 = re.compile('\n\[\[[iI]mage(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE) # keep description of images
RE_P8 = re.compile('\n\[\[[fF]ile(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE) # keep description of files
RE_P9 = re.compile('<nowiki([> ].*?)(</nowiki>|/>)', re.DOTALL | re.UNICODE) # outside links
RE_P10 = re.compile('<math([> ].*?)(</math>|/>)', re.DOTALL | re.UNICODE) # math content
RE_P11 = re.compile('<(.*?)>', re.DOTALL | re.UNICODE) # all other tags
RE_P12 = re.compile('\n(({\|)|(\|-)|(\|}))(.*?)(?=\n)', re.UNICODE) # table formatting
RE_P13 = re.compile('\n(\||\!)(.*?\|)*([^|]*?)', re.UNICODE) # table cell formatting
RE_P14 = re.compile('\[\[Category:[^][]*\]\]', re.UNICODE) # categories
# Remove File and Image template
RE_P15 = re.compile('\[\[([fF]ile:|[iI]mage)[^]]*(\]\])', re.UNICODE)
# MediaWiki namespaces (https://www.mediawiki.org/wiki/Manual:Namespace) that
# ought to be ignored
IGNORED_NAMESPACES = ['Wikipedia', 'Category', 'File', 'Portal', 'Template',
'MediaWiki', 'User', 'Help', 'Book', 'Draft',
'WikiProject', 'Special', 'Talk']
def filter_wiki(raw):
"""
Filter out wiki mark-up from `raw`, leaving only text. `raw` is either unicode
or utf-8 encoded string.
"""
# parsing of the wiki markup is not perfect, but sufficient for our purposes
# contributions to improving this code are welcome :)
text = utils.to_unicode(raw, 'utf8', errors='ignore')
text = utils.decode_htmlentities(text) # '&nbsp;' --> '\xa0'
return remove_markup(text)
def remove_markup(text):
text = re.sub(RE_P2, "", text) # remove the last list (=languages)
# the wiki markup is recursive (markup inside markup etc)
# instead of writing a recursive grammar, here we deal with that by removing
# markup in a loop, starting with inner-most expressions and working outwards,
# for as long as something changes.
text = remove_template(text)
text = remove_file(text)
iters = 0
while True:
old, iters = text, iters + 1
text = re.sub(RE_P0, "", text) # remove comments
text = re.sub(RE_P1, '', text) # remove footnotes
text = re.sub(RE_P9, "", text) # remove outside links
text = re.sub(RE_P10, "", text) # remove math content
text = re.sub(RE_P11, "", text) # remove all remaining tags
text = re.sub(RE_P14, '', text) # remove categories
text = re.sub(RE_P5, '\\3', text) # remove urls, keep description
text = re.sub(RE_P6, '\\2', text) # simplify links, keep description only
# remove table markup
text = text.replace('||', '\n|') # each table cell on a separate line
text = re.sub(RE_P12, '\n', text) # remove formatting lines
text = re.sub(RE_P13, '\n\\3', text) # leave only cell content
# remove empty mark-up
text = text.replace('[]', '')
if old == text or iters > 2: # stop if nothing changed between two iterations or after a fixed number of iterations
break
# the following is needed to make the tokenizer see '[[socialist]]s' as a single word 'socialists'
# TODO is this really desirable?
text = text.replace('[', '').replace(']', '') # promote all remaining markup to plain text
return text
def remove_template(s):
"""Remove template wikimedia markup.
Return a copy of `s` with all the wikimedia markup template removed. See
http://meta.wikimedia.org/wiki/Help:Template for wikimedia templates
details.
Note: Since template can be nested, it is difficult remove them using
regular expresssions.
"""
# Find the start and end position of each template by finding the opening
# '{{' and closing '}}'
n_open, n_close = 0, 0
starts, ends = [], []
in_template = False
prev_c = None
for i, c in enumerate(iter(s)):
if not in_template:
if c == '{' and c == prev_c:
starts.append(i - 1)
in_template = True
n_open = 1
if in_template:
if c == '{':
n_open += 1
elif c == '}':
n_close += 1
if n_open == n_close:
ends.append(i)
in_template = False
n_open, n_close = 0, 0
prev_c = c
# Remove all the templates
s = ''.join([s[end + 1:start] for start, end in
zip(starts + [None], [-1] + ends)])
return s
def remove_file(s):
"""Remove the 'File:' and 'Image:' markup, keeping the file caption.
Return a copy of `s` with all the 'File:' and 'Image:' markup replaced by
their corresponding captions. See http://www.mediawiki.org/wiki/Help:Images
for the markup details.
"""
# The regex RE_P15 match a File: or Image: markup
for match in re.finditer(RE_P15, s):
m = match.group(0)
caption = m[:-2].split('|')[-1]
s = s.replace(m, caption, 1)
return s
def tokenize(content):
"""
Tokenize a piece of text from wikipedia. The input string `content` is assumed
to be mark-up free (see `filter_wiki()`).
Return list of tokens as utf8 bytestrings. Ignore words shorted than 2 or longer
that 15 characters (not bytes!).
"""
# TODO maybe ignore tokens with non-latin characters? (no chinese, arabic, russian etc.)
return [
utils.to_unicode(token) for token in utils.tokenize(content, lower=True, errors='ignore')
if 2 <= len(token) <= 15 and not token.startswith('_')
]
def get_namespace(tag):
"""Returns the namespace of tag."""
m = re.match("^{(.*?)}", tag)
namespace = m.group(1) if m else ""
if not namespace.startswith("http://www.mediawiki.org/xml/export-"):
raise ValueError("%s not recognized as MediaWiki dump namespace"
% namespace)
return namespace
_get_namespace = get_namespace
def extract_pages(f, filter_namespaces=False):
"""
Extract pages from a MediaWiki database dump = open file-like object `f`.
Return an iterable over (str, str, str) which generates (title, content, pageid) triplets.
"""
elems = (elem for _, elem in iterparse(f, events=("end",)))
# We can't rely on the namespace for database dumps, since it's changed
# it every time a small modification to the format is made. So, determine
# those from the first element we find, which will be part of the metadata,
# and construct element paths.
elem = next(elems)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
page_tag = "{%(ns)s}page" % ns_mapping
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
ns_path = "./{%(ns)s}ns" % ns_mapping
pageid_path = "./{%(ns)s}id" % ns_mapping
for elem in elems:
if elem.tag == page_tag:
title = elem.find(title_path).text
text = elem.find(text_path).text
if filter_namespaces:
ns = elem.find(ns_path).text
if ns not in filter_namespaces:
text = None
pageid = elem.find(pageid_path).text
yield title, text or "", pageid # empty page will yield None
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# except that we don't need to prune backlinks from the parent
# because we don't use LXML.
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. The pages comprise the bulk of the
# file, so in practice we prune away enough.
elem.clear()
_extract_pages = extract_pages # for backward compatibility
def process_article(args):
"""
Parse a wikipedia article, returning its content as a list of tokens
(utf8-encoded strings).
"""
text, lemmatize, title, pageid = args
text = filter_wiki(text)
if lemmatize:
result = utils.lemmatize(text)
else:
result = tokenize(text)
return result, title, pageid
class WikiCorpus(TextCorpus):
"""
Treat a wikipedia articles dump (\*articles.xml.bz2) as a (read-only) corpus.
The documents are extracted on-the-fly, so that the whole (massive) dump
can stay compressed on disk.
>>> wiki = WikiCorpus('enwiki-20100622-pages-articles.xml.bz2') # create word->word_id mapping, takes almost 8h
>>> MmCorpus.serialize('wiki_en_vocab200k.mm', wiki) # another 8h, creates a file in MatrixMarket format plus file with id->word
"""
def __init__(self, fname, processes=None, lemmatize=utils.has_pattern(), dictionary=None, filter_namespaces=('0',)):
"""
Initialize the corpus. Unless a dictionary is provided, this scans the
corpus once, to determine its vocabulary.
If `pattern` package is installed, use fancier shallow parsing to get
token lemmas. Otherwise, use simple regexp tokenization. You can override
this automatic logic by forcing the `lemmatize` parameter explicitly.
self.metadata if set to true will ensure that serialize will write out article titles to a pickle file.
"""
self.fname = fname
self.filter_namespaces = filter_namespaces
self.metadata = False
if processes is None:
processes = max(1, multiprocessing.cpu_count() - 1)
self.processes = processes
self.lemmatize = lemmatize
if dictionary is None:
self.dictionary = Dictionary(self.get_texts())
else:
self.dictionary = dictionary
def get_texts(self):
"""
Iterate over the dump, returning text version of each article as a list
of tokens.
Only articles of sufficient length are returned (short articles & redirects
etc are ignored).
Note that this iterates over the **texts**; if you want vectors, just use
the standard corpus interface instead of this function::
>>> for vec in wiki_corpus:
>>> print(vec)
"""
articles, articles_all = 0, 0
positions, positions_all = 0, 0
texts = ((text, self.lemmatize, title, pageid) for title, text, pageid in extract_pages(bz2.BZ2File(self.fname), self.filter_namespaces))
pool = multiprocessing.Pool(self.processes)
# process the corpus in smaller chunks of docs, because multiprocessing.Pool
# is dumb and would load the entire input into RAM at once...
for group in utils.chunkize(texts, chunksize=10 * self.processes, maxsize=1):
for tokens, title, pageid in pool.imap(process_article, group): # chunksize=10):
articles_all += 1
positions_all += len(tokens)
# article redirects and short stubs are pruned here
if len(tokens) < ARTICLE_MIN_WORDS or any(title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES):
continue
articles += 1
positions += len(tokens)
if self.metadata:
yield (tokens, (pageid, title))
else:
yield tokens
pool.terminate()
logger.info(
"finished iterating over Wikipedia corpus of %i documents with %i positions"
" (total %i articles, %i positions before pruning articles shorter than %i words)",
articles, positions, articles_all, positions_all, ARTICLE_MIN_WORDS)
self.length = articles # cache corpus length
# endclass WikiCorpus
| lgpl-2.1 | 2,339,635,076,073,072,000 | 40.162577 | 145 | 0.6148 | false |
vkroz/kafka | system_test/utils/system_test_utils.py | 88 | 23697 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# system_test_utils.py
# ===================================
import copy
import difflib
import inspect
import json
import logging
import os
import re
import signal
import socket
import subprocess
import sys
import time
logger = logging.getLogger("namedLogger")
aLogger = logging.getLogger("anonymousLogger")
thisClassName = '(system_test_utils)'
d = {'name_of_class': thisClassName}
def get_current_unix_timestamp():
ts = time.time()
return "{0:.6f}".format(ts)
def get_local_hostname():
return socket.gethostname()
def sys_call(cmdStr):
output = ""
#logger.info("executing command [" + cmdStr + "]", extra=d)
p = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout.readlines():
output += line
return output
def remote_async_sys_call(host, cmd):
cmdStr = "ssh " + host + " \"" + cmd + "\""
logger.info("executing command [" + cmdStr + "]", extra=d)
async_sys_call(cmdStr)
def remote_sys_call(host, cmd):
cmdStr = "ssh " + host + " \"" + cmd + "\""
logger.info("executing command [" + cmdStr + "]", extra=d)
sys_call(cmdStr)
def get_dir_paths_with_prefix(fullPath, dirNamePrefix):
dirsList = []
for dirName in os.listdir(fullPath):
if not os.path.isfile(dirName) and dirName.startswith(dirNamePrefix):
dirsList.append(os.path.abspath(fullPath + "/" + dirName))
return dirsList
def get_testcase_prop_json_pathname(testcasePathName):
testcaseDirName = os.path.basename(testcasePathName)
return testcasePathName + "/" + testcaseDirName + "_properties.json"
def get_json_list_data(infile):
json_file_str = open(infile, "r").read()
json_data = json.loads(json_file_str)
data_list = []
for key,settings in json_data.items():
if type(settings) == list:
for setting in settings:
if type(setting) == dict:
kv_dict = {}
for k,v in setting.items():
kv_dict[k] = v
data_list.append(kv_dict)
return data_list
def get_dict_from_list_of_dicts(listOfDicts, lookupKey, lookupVal):
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
#
# Usage:
#
# 1. get_data_from_list_of_dicts(self.clusterConfigsList, "entity_id", "0", "role")
# returns:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
#
# 2. get_data_from_list_of_dicts(self.clusterConfigsList, None, None, "role")
# returns:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
retList = []
if ( lookupVal is None or lookupKey is None ):
for dict in listOfDicts:
for k,v in dict.items():
if ( k == fieldToRetrieve ): # match with fieldToRetrieve ONLY
retList.append( dict )
else:
for dict in listOfDicts:
for k,v in dict.items():
if ( k == lookupKey and v == lookupVal ): # match with lookupKey and lookupVal
retList.append( dict )
return retList
def get_data_from_list_of_dicts(listOfDicts, lookupKey, lookupVal, fieldToRetrieve):
# Sample List of Dicts:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
#
# Usage:
# 1. get_data_from_list_of_dicts(self.clusterConfigsList, "entity_id", "0", "role")
# => returns ['zookeeper']
# 2. get_data_from_list_of_dicts(self.clusterConfigsList, None, None, "role")
# => returns ['zookeeper', 'broker']
retList = []
if ( lookupVal is None or lookupKey is None ):
for dict in listOfDicts:
for k,v in dict.items():
if ( k == fieldToRetrieve ): # match with fieldToRetrieve ONLY
try:
retList.append( dict[fieldToRetrieve] )
except:
logger.debug("field not found: " + fieldToRetrieve, extra=d)
else:
for dict in listOfDicts:
for k,v in dict.items():
if ( k == lookupKey and v == lookupVal ): # match with lookupKey and lookupVal
try:
retList.append( dict[fieldToRetrieve] )
except:
logger.debug("field not found: " + fieldToRetrieve, extra=d)
return retList
def get_data_by_lookup_keyval(listOfDict, lookupKey, lookupVal, fieldToRetrieve):
returnValue = ""
returnValuesList = get_data_from_list_of_dicts(listOfDict, lookupKey, lookupVal, fieldToRetrieve)
if len(returnValuesList) > 0:
returnValue = returnValuesList[0]
return returnValue
def get_json_dict_data(infile):
json_file_str = open(infile, "r").read()
json_data = json.loads(json_file_str)
data_dict = {}
for key,val in json_data.items():
if ( type(val) != list ):
data_dict[key] = val
return data_dict
def get_remote_child_processes(hostname, pid):
pidStack = []
cmdList = ['''ssh ''' + hostname,
''''pid=''' + pid + '''; prev_pid=""; echo $pid;''',
'''while [[ "x$pid" != "x" ]];''',
'''do prev_pid=$pid;''',
''' for child in $(ps -o pid,ppid ax | awk "{ if ( \$2 == $pid ) { print \$1 }}");''',
''' do echo $child; pid=$child;''',
''' done;''',
''' if [ $prev_pid == $pid ]; then''',
''' break;''',
''' fi;''',
'''done' 2> /dev/null''']
cmdStr = " ".join(cmdList)
logger.debug("executing command [" + cmdStr, extra=d)
subproc = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE)
for line in subproc.stdout.readlines():
procId = line.rstrip('\n')
pidStack.append(procId)
return pidStack
def get_child_processes(pid):
pidStack = []
currentPid = pid
parentPid = ""
pidStack.append(pid)
while ( len(currentPid) > 0 ):
psCommand = subprocess.Popen("ps -o pid --ppid %s --noheaders" % currentPid, shell=True, stdout=subprocess.PIPE)
psOutput = psCommand.stdout.read()
outputLine = psOutput.rstrip('\n')
childPid = outputLine.lstrip()
if ( len(childPid) > 0 ):
pidStack.append(childPid)
currentPid = childPid
else:
break
return pidStack
def sigterm_remote_process(hostname, pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -15 " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def sigkill_remote_process(hostname, pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -9 " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def simulate_garbage_collection_pause_in_remote_process(hostname, pidStack, pauseTimeInSeconds):
pausedPidStack = []
# pause the processes
while len(pidStack) > 0:
pid = pidStack.pop()
pausedPidStack.append(pid)
cmdStr = "ssh " + hostname + " 'kill -SIGSTOP " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
time.sleep(int(pauseTimeInSeconds))
# resume execution of the processes
while len(pausedPidStack) > 0:
pid = pausedPidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -SIGCONT " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def terminate_process(pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
try:
os.kill(int(pid), signal.SIGTERM)
except:
print "WARN - pid:",pid,"not found"
raise
def convert_keyval_to_cmd_args(configFilePathname):
cmdArg = ""
inlines = open(configFilePathname, "r").readlines()
for inline in inlines:
line = inline.rstrip()
tokens = line.split('=', 1)
if (len(tokens) == 2):
cmdArg = cmdArg + " --" + tokens[0] + " " + tokens[1]
elif (len(tokens) == 1):
cmdArg = cmdArg + " --" + tokens[0]
else:
print "ERROR: unexpected arguments list", line
return cmdArg
def async_sys_call(cmd_str):
subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def sys_call_return_subproc(cmd_str):
p = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p
def remote_host_file_exists(hostname, pathname):
cmdStr = "ssh " + hostname + " 'ls " + pathname + "'"
logger.debug("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
if "No such file or directory" in line:
return False
return True
def remote_host_directory_exists(hostname, path):
cmdStr = "ssh " + hostname + " 'ls -d " + path + "'"
logger.debug("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
if "No such file or directory" in line:
return False
return True
def remote_host_processes_stopped(hostname):
cmdStr = "ssh " + hostname + \
" \"ps auxw | grep -v grep | grep -v Bootstrap | grep -i 'java\|run\-\|producer\|consumer\|jmxtool\|kafka' | wc -l\" 2> /dev/null"
logger.info("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.info("no. of running processes found : [" + line + "]", extra=d)
if line == '0':
return True
return False
def setup_remote_hosts(systemTestEnv):
# sanity check on remote hosts to make sure:
# - all directories (eg. java_home) specified in cluster_config.json exists in all hosts
# - no conflicting running processes in remote hosts
aLogger.info("=================================================")
aLogger.info("setting up remote hosts ...")
aLogger.info("=================================================")
clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList
localKafkaHome = os.path.abspath(systemTestEnv.SYSTEM_TEST_BASE_DIR + "/..")
# when configuring "default" java_home, use JAVA_HOME environment variable, if exists
# otherwise, use the directory with the java binary
localJavaHome = os.environ.get('JAVA_HOME')
if localJavaHome is not None:
localJavaBin = localJavaHome + '/bin/java'
else:
subproc = sys_call_return_subproc("which java")
for line in subproc.stdout.readlines():
if line.startswith("which: no "):
logger.error("No Java binary found in local host", extra=d)
return False
else:
line = line.rstrip('\n')
localJavaBin = line
matchObj = re.match("(.*)\/bin\/java$", line)
localJavaHome = matchObj.group(1)
listIndex = -1
for clusterEntityConfigDict in clusterEntityConfigDictList:
listIndex += 1
hostname = clusterEntityConfigDict["hostname"]
kafkaHome = clusterEntityConfigDict["kafka_home"]
javaHome = clusterEntityConfigDict["java_home"]
if hostname == "localhost" and javaHome == "default":
clusterEntityConfigDictList[listIndex]["java_home"] = localJavaHome
if hostname == "localhost" and kafkaHome == "default":
clusterEntityConfigDictList[listIndex]["kafka_home"] = localKafkaHome
if hostname == "localhost" and kafkaHome == "system_test/migration_tool_testsuite/0.7":
clusterEntityConfigDictList[listIndex]["kafka_home"] = localKafkaHome + "/system_test/migration_tool_testsuite/0.7"
kafkaHome = clusterEntityConfigDict["kafka_home"]
javaHome = clusterEntityConfigDict["java_home"]
logger.debug("checking java binary [" + localJavaBin + "] in host [" + hostname + "]", extra=d)
if not remote_host_directory_exists(hostname, javaHome):
logger.error("Directory not found: [" + javaHome + "] in host [" + hostname + "]", extra=d)
return False
logger.debug("checking directory [" + kafkaHome + "] in host [" + hostname + "]", extra=d)
if not remote_host_directory_exists(hostname, kafkaHome):
logger.info("Directory not found: [" + kafkaHome + "] in host [" + hostname + "]", extra=d)
if hostname == "localhost":
return False
else:
localKafkaSourcePath = systemTestEnv.SYSTEM_TEST_BASE_DIR + "/.."
logger.debug("copying local copy of [" + localKafkaSourcePath + "] to " + hostname + ":" + kafkaHome, extra=d)
copy_source_to_remote_hosts(hostname, localKafkaSourcePath, kafkaHome)
return True
def copy_source_to_remote_hosts(hostname, sourceDir, destDir):
cmdStr = "rsync -avz --delete-before " + sourceDir + "/ " + hostname + ":" + destDir
logger.info("executing command [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
dummyVar = 1
def remove_kafka_home_dir_at_remote_hosts(hostname, kafkaHome):
if remote_host_file_exists(hostname, kafkaHome + "/bin/kafka-run-class.sh"):
cmdStr = "ssh " + hostname + " 'chmod -R 777 " + kafkaHome + "'"
logger.info("executing command [" + cmdStr + "]", extra=d)
sys_call(cmdStr)
cmdStr = "ssh " + hostname + " 'rm -rf " + kafkaHome + "'"
logger.info("executing command [" + cmdStr + "]", extra=d)
#sys_call(cmdStr)
else:
logger.warn("possible destructive command [" + cmdStr + "]", extra=d)
logger.warn("check config file: system_test/cluster_config.properties", extra=d)
logger.warn("aborting test...", extra=d)
sys.exit(1)
def get_md5_for_file(filePathName, blockSize=8192):
md5 = hashlib.md5()
f = open(filePathName, 'rb')
while True:
data = f.read(blockSize)
if not data:
break
md5.update(data)
return md5.digest()
def load_cluster_config(clusterConfigPathName, clusterEntityConfigDictList):
# empty the list
clusterEntityConfigDictList[:] = []
# retrieve each entity's data from cluster config json file
# as "dict" and enter them into a "list"
jsonFileContent = open(clusterConfigPathName, "r").read()
jsonData = json.loads(jsonFileContent)
for key, cfgList in jsonData.items():
if key == "cluster_config":
for cfg in cfgList:
clusterEntityConfigDictList.append(cfg)
def setup_remote_hosts_with_testcase_level_cluster_config(systemTestEnv, testCasePathName):
# =======================================================================
# starting a new testcase, check for local cluster_config.json
# =======================================================================
# 1. if there is a xxxx_testsuite/testcase_xxxx/cluster_config.json
# => load it into systemTestEnv.clusterEntityConfigDictList
# 2. if there is NO testcase_xxxx/cluster_config.json but has a xxxx_testsuite/cluster_config.json
# => retore systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite
# 3. if there is NO testcase_xxxx/cluster_config.json NOR xxxx_testsuite/cluster_config.json
# => restore system_test/cluster_config.json
testCaseLevelClusterConfigPathName = testCasePathName + "/cluster_config.json"
if os.path.isfile(testCaseLevelClusterConfigPathName):
# if there is a cluster_config.json in this directory, load it and use it for this testsuite
logger.info("found a new cluster_config : " + testCaseLevelClusterConfigPathName, extra=d)
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# load the cluster config for this testcase level
load_cluster_config(testCaseLevelClusterConfigPathName, systemTestEnv.clusterEntityConfigDictList)
# back up this testcase level cluster config
systemTestEnv.clusterEntityConfigDictListLastFoundInTestCase = copy.deepcopy(systemTestEnv.clusterEntityConfigDictList)
elif len(systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite) > 0:
# if there is NO testcase_xxxx/cluster_config.json, but has a xxxx_testsuite/cluster_config.json
# => restore the config in xxxx_testsuite/cluster_config.json
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite)
else:
# if there is NONE, restore the config in system_test/cluster_config.json
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListInSystemTestLevel)
# set up remote hosts
if not setup_remote_hosts(systemTestEnv):
logger.error("Remote hosts sanity check failed. Aborting test ...", extra=d)
print
sys.exit(1)
print
def setup_remote_hosts_with_testsuite_level_cluster_config(systemTestEnv, testModulePathName):
# =======================================================================
# starting a new testsuite, check for local cluster_config.json:
# =======================================================================
# 1. if there is a xxxx_testsuite/cluster_config.son
# => load it into systemTestEnv.clusterEntityConfigDictList
# 2. if there is NO xxxx_testsuite/cluster_config.son
# => restore system_test/cluster_config.json
testSuiteLevelClusterConfigPathName = testModulePathName + "/cluster_config.json"
if os.path.isfile(testSuiteLevelClusterConfigPathName):
# if there is a cluster_config.json in this directory, load it and use it for this testsuite
logger.info("found a new cluster_config : " + testSuiteLevelClusterConfigPathName, extra=d)
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# load the cluster config for this testsuite level
load_cluster_config(testSuiteLevelClusterConfigPathName, systemTestEnv.clusterEntityConfigDictList)
# back up this testsuite level cluster config
systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite = copy.deepcopy(systemTestEnv.clusterEntityConfigDictList)
else:
# if there is NONE, restore the config in system_test/cluster_config.json
# empty the last testsuite level cluster config list
systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite[:] = []
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListInSystemTestLevel)
# set up remote hosts
if not setup_remote_hosts(systemTestEnv):
logger.error("Remote hosts sanity check failed. Aborting test ...", extra=d)
print
sys.exit(1)
print
# =================================================
# lists_diff_count
# - find the no. of different items in both lists
# - both lists need not be sorted
# - input lists won't be changed
# =================================================
def lists_diff_count(a, b):
c = list(b)
d = []
for item in a:
try:
c.remove(item)
except:
d.append(item)
if len(d) > 0:
print "#### Mismatch MessageID"
print d
return len(c) + len(d)
# =================================================
# subtract_list
# - subtract items in listToSubtract from mainList
# and return the resulting list
# - both lists need not be sorted
# - input lists won't be changed
# =================================================
def subtract_list(mainList, listToSubtract):
remainingList = list(mainList)
for item in listToSubtract:
try:
remainingList.remove(item)
except:
pass
return remainingList
# =================================================
# diff_lists
# - find the diff of 2 lists and return the
# total no. of mismatch from both lists
# - diff of both lists includes:
# - no. of items mismatch
# - ordering of the items
#
# sample lists:
# a = ['8','4','3','2','1']
# b = ['8','3','4','2','1']
#
# difflib will return the following:
# 8
# + 3
# 4
# - 3
# 2
# 1
#
# diff_lists(a,b) returns 2 and prints the following:
# #### only in seq 2 : + 3
# #### only in seq 1 : - 3
# =================================================
def diff_lists(a, b):
mismatchCount = 0
d = difflib.Differ()
diff = d.compare(a,b)
for item in diff:
result = item[0:1].strip()
if len(result) > 0:
mismatchCount += 1
if '-' in result:
logger.debug("#### only in seq 1 : " + item, extra=d)
elif '+' in result:
logger.debug("#### only in seq 2 : " + item, extra=d)
return mismatchCount
| apache-2.0 | -126,760,310,362,561,020 | 36.142633 | 143 | 0.601722 | false |
dudymas/python-openstacksdk | openstack/tests/unit/block_store/test_block_store_service.py | 4 | 1110 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from openstack.block_store import block_store_service
class TestBlockStoreService(testtools.TestCase):
def test_service(self):
sot = block_store_service.BlockStoreService()
self.assertEqual("volume", sot.service_type)
self.assertEqual("public", sot.interface)
self.assertIsNone(sot.region)
self.assertIsNone(sot.service_name)
self.assertEqual(1, len(sot.valid_versions))
self.assertEqual("v2", sot.valid_versions[0].module)
self.assertEqual("v2", sot.valid_versions[0].path)
| apache-2.0 | -6,463,571,559,528,288,000 | 38.642857 | 75 | 0.730631 | false |
Serg09/socorro | socorro/external/postgresql/skiplist.py | 8 | 3829 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
import psycopg2
from socorro.external import DatabaseError, MissingArgumentError
from socorro.external.postgresql.base import PostgreSQLBase
from socorro.lib import external_common
logger = logging.getLogger("webapi")
class SkipList(PostgreSQLBase):
filters = [
("category", None, ["str"]),
("rule", None, ["str"]),
]
def get(self, **kwargs):
params = external_common.parse_arguments(self.filters, kwargs)
sql_params = []
sql = """
/* socorro.external.postgresql.skiplist.SkipList.get */
SELECT category,
rule
FROM skiplist
WHERE 1=1
"""
if params.category:
sql += 'AND category=%s'
sql_params.append(params.category)
if params.rule:
sql += 'AND rule=%s'
sql_params.append(params.rule)
# Use `UPPER()` to make the sort case insensitive
# which makes it more user-friendly on the UI later
sql += """
ORDER BY UPPER(category), UPPER(rule)
"""
error_message = "Failed to retrieve skip list data from PostgreSQL"
sql_results = self.query(sql, sql_params, error_message=error_message)
results = [dict(zip(("category", "rule"), x)) for x in sql_results]
return {'hits': results, 'total': len(results)}
def post(self, **kwargs):
params = external_common.parse_arguments(self.filters, kwargs)
if not params.category:
raise MissingArgumentError('category')
if not params.rule:
raise MissingArgumentError('rule')
sql = """
/* socorro.external.postgresql.skiplist.SkipList.post */
INSERT INTO skiplist (category, rule)
VALUES (%s, %s);
"""
sql_params = [params.category, params.rule]
connection = self.database.connection()
try:
with connection.cursor() as cur:
cur.execute(sql, sql_params)
connection.commit()
except psycopg2.Error:
connection.rollback()
error_message = "Failed updating skip list in PostgreSQL"
logger.error(error_message)
raise DatabaseError(error_message)
finally:
connection.close()
return True
def delete(self, **kwargs):
params = external_common.parse_arguments(self.filters, kwargs)
if not params.category:
raise MissingArgumentError('category')
if not params.rule:
raise MissingArgumentError('rule')
sql_params = [params.category, params.rule]
count_sql = """
/* socorro.external.postgresql.skiplist.SkipList.delete */
SELECT COUNT(*) FROM skiplist
WHERE category=%s AND rule=%s
"""
sql = """
/* socorro.external.postgresql.skiplist.SkipList.delete */
DELETE FROM skiplist
WHERE category=%s AND rule=%s
"""
connection = self.database.connection()
try:
cur = connection.cursor()
count = self.count(count_sql, sql_params, connection=connection)
if not count:
return False
cur.execute(sql, sql_params)
connection.commit()
except psycopg2.Error:
connection.rollback()
error_message = "Failed delete skip list in PostgreSQL"
logger.error(error_message)
raise DatabaseError(error_message)
finally:
connection.close()
return True
| mpl-2.0 | -3,424,378,661,777,563,600 | 32.295652 | 78 | 0.583181 | false |
fuhongliang/odoo | addons/crm/wizard/crm_partner_binding.py | 257 | 4570 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class crm_partner_binding(osv.osv_memory):
"""
Handle the partner binding or generation in any CRM wizard that requires
such feature, like the lead2opportunity wizard, or the
phonecall2opportunity wizard. Try to find a matching partner from the
CRM model's information (name, email, phone number, etc) or create a new
one on the fly.
Use it like a mixin with the wizard of your choice.
"""
_name = 'crm.partner.binding'
_description = 'Handle partner binding or generation in CRM wizards.'
_columns = {
'action': fields.selection([
('exist', 'Link to an existing customer'),
('create', 'Create a new customer'),
('nothing', 'Do not link to a customer')
], 'Related Customer', required=True),
'partner_id': fields.many2one('res.partner', 'Customer'),
}
def _find_matching_partner(self, cr, uid, context=None):
"""
Try to find a matching partner regarding the active model data, like
the customer's name, email, phone number, etc.
:return int partner_id if any, False otherwise
"""
if context is None:
context = {}
partner_id = False
partner_obj = self.pool.get('res.partner')
# The active model has to be a lead or a phonecall
if (context.get('active_model') == 'crm.lead') and context.get('active_id'):
active_model = self.pool.get('crm.lead').browse(cr, uid, context.get('active_id'), context=context)
elif (context.get('active_model') == 'crm.phonecall') and context.get('active_id'):
active_model = self.pool.get('crm.phonecall').browse(cr, uid, context.get('active_id'), context=context)
# Find the best matching partner for the active model
if (active_model):
partner_obj = self.pool.get('res.partner')
# A partner is set already
if active_model.partner_id:
partner_id = active_model.partner_id.id
# Search through the existing partners based on the lead's email
elif active_model.email_from:
partner_ids = partner_obj.search(cr, uid, [('email', '=', active_model.email_from)], context=context)
if partner_ids:
partner_id = partner_ids[0]
# Search through the existing partners based on the lead's partner or contact name
elif active_model.partner_name:
partner_ids = partner_obj.search(cr, uid, [('name', 'ilike', '%'+active_model.partner_name+'%')], context=context)
if partner_ids:
partner_id = partner_ids[0]
elif active_model.contact_name:
partner_ids = partner_obj.search(cr, uid, [
('name', 'ilike', '%'+active_model.contact_name+'%')], context=context)
if partner_ids:
partner_id = partner_ids[0]
return partner_id
def default_get(self, cr, uid, fields, context=None):
res = super(crm_partner_binding, self).default_get(cr, uid, fields, context=context)
partner_id = self._find_matching_partner(cr, uid, context=context)
if 'action' in fields and not res.get('action'):
res['action'] = partner_id and 'exist' or 'create'
if 'partner_id' in fields:
res['partner_id'] = partner_id
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,605,517,488,752,603,000 | 45.161616 | 130 | 0.601313 | false |
nevir/plexability | extern/depot_tools/third_party/logilab/common/proc.py | 117 | 9352 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""module providing:
* process information (linux specific: rely on /proc)
* a class for resource control (memory / time / cpu time)
This module doesn't work on windows platforms (only tested on linux)
:organization: Logilab
"""
__docformat__ = "restructuredtext en"
import os
import stat
from resource import getrlimit, setrlimit, RLIMIT_CPU, RLIMIT_AS
from signal import signal, SIGXCPU, SIGKILL, SIGUSR2, SIGUSR1
from threading import Timer, currentThread, Thread, Event
from time import time
from logilab.common.tree import Node
class NoSuchProcess(Exception): pass
def proc_exists(pid):
"""check the a pid is registered in /proc
raise NoSuchProcess exception if not
"""
if not os.path.exists('/proc/%s' % pid):
raise NoSuchProcess()
PPID = 3
UTIME = 13
STIME = 14
CUTIME = 15
CSTIME = 16
VSIZE = 22
class ProcInfo(Node):
"""provide access to process information found in /proc"""
def __init__(self, pid):
self.pid = int(pid)
Node.__init__(self, self.pid)
proc_exists(self.pid)
self.file = '/proc/%s/stat' % self.pid
self.ppid = int(self.status()[PPID])
def memory_usage(self):
"""return the memory usage of the process in Ko"""
try :
return int(self.status()[VSIZE])
except IOError:
return 0
def lineage_memory_usage(self):
return self.memory_usage() + sum([child.lineage_memory_usage()
for child in self.children])
def time(self, children=0):
"""return the number of jiffies that this process has been scheduled
in user and kernel mode"""
status = self.status()
time = int(status[UTIME]) + int(status[STIME])
if children:
time += int(status[CUTIME]) + int(status[CSTIME])
return time
def status(self):
"""return the list of fields found in /proc/<pid>/stat"""
return open(self.file).read().split()
def name(self):
"""return the process name found in /proc/<pid>/stat
"""
return self.status()[1].strip('()')
def age(self):
"""return the age of the process
"""
return os.stat(self.file)[stat.ST_MTIME]
class ProcInfoLoader:
"""manage process information"""
def __init__(self):
self._loaded = {}
def list_pids(self):
"""return a list of existent process ids"""
for subdir in os.listdir('/proc'):
if subdir.isdigit():
yield int(subdir)
def load(self, pid):
"""get a ProcInfo object for a given pid"""
pid = int(pid)
try:
return self._loaded[pid]
except KeyError:
procinfo = ProcInfo(pid)
procinfo.manager = self
self._loaded[pid] = procinfo
return procinfo
def load_all(self):
"""load all processes information"""
for pid in self.list_pids():
try:
procinfo = self.load(pid)
if procinfo.parent is None and procinfo.ppid:
pprocinfo = self.load(procinfo.ppid)
pprocinfo.append(procinfo)
except NoSuchProcess:
pass
try:
class ResourceError(BaseException):
"""Error raise when resource limit is reached"""
limit = "Unknown Resource Limit"
except NameError:
class ResourceError(Exception):
"""Error raise when resource limit is reached"""
limit = "Unknown Resource Limit"
class XCPUError(ResourceError):
"""Error raised when CPU Time limit is reached"""
limit = "CPU Time"
class LineageMemoryError(ResourceError):
"""Error raised when the total amount of memory used by a process and
it's child is reached"""
limit = "Lineage total Memory"
class TimeoutError(ResourceError):
"""Error raised when the process is running for to much time"""
limit = "Real Time"
# Can't use subclass because the StandardError MemoryError raised
RESOURCE_LIMIT_EXCEPTION = (ResourceError, MemoryError)
class MemorySentinel(Thread):
"""A class checking a process don't use too much memory in a separated
daemonic thread
"""
def __init__(self, interval, memory_limit, gpid=os.getpid()):
Thread.__init__(self, target=self._run, name="Test.Sentinel")
self.memory_limit = memory_limit
self._stop = Event()
self.interval = interval
self.setDaemon(True)
self.gpid = gpid
def stop(self):
"""stop ap"""
self._stop.set()
def _run(self):
pil = ProcInfoLoader()
while not self._stop.isSet():
if self.memory_limit <= pil.load(self.gpid).lineage_memory_usage():
os.killpg(self.gpid, SIGUSR1)
self._stop.wait(self.interval)
class ResourceController:
def __init__(self, max_cpu_time=None, max_time=None, max_memory=None,
max_reprieve=60):
if SIGXCPU == -1:
raise RuntimeError("Unsupported platform")
self.max_time = max_time
self.max_memory = max_memory
self.max_cpu_time = max_cpu_time
self._reprieve = max_reprieve
self._timer = None
self._msentinel = None
self._old_max_memory = None
self._old_usr1_hdlr = None
self._old_max_cpu_time = None
self._old_usr2_hdlr = None
self._old_sigxcpu_hdlr = None
self._limit_set = 0
self._abort_try = 0
self._start_time = None
self._elapse_time = 0
def _hangle_sig_timeout(self, sig, frame):
raise TimeoutError()
def _hangle_sig_memory(self, sig, frame):
if self._abort_try < self._reprieve:
self._abort_try += 1
raise LineageMemoryError("Memory limit reached")
else:
os.killpg(os.getpid(), SIGKILL)
def _handle_sigxcpu(self, sig, frame):
if self._abort_try < self._reprieve:
self._abort_try += 1
raise XCPUError("Soft CPU time limit reached")
else:
os.killpg(os.getpid(), SIGKILL)
def _time_out(self):
if self._abort_try < self._reprieve:
self._abort_try += 1
os.killpg(os.getpid(), SIGUSR2)
if self._limit_set > 0:
self._timer = Timer(1, self._time_out)
self._timer.start()
else:
os.killpg(os.getpid(), SIGKILL)
def setup_limit(self):
"""set up the process limit"""
assert currentThread().getName() == 'MainThread'
os.setpgrp()
if self._limit_set <= 0:
if self.max_time is not None:
self._old_usr2_hdlr = signal(SIGUSR2, self._hangle_sig_timeout)
self._timer = Timer(max(1, int(self.max_time) - self._elapse_time),
self._time_out)
self._start_time = int(time())
self._timer.start()
if self.max_cpu_time is not None:
self._old_max_cpu_time = getrlimit(RLIMIT_CPU)
cpu_limit = (int(self.max_cpu_time), self._old_max_cpu_time[1])
self._old_sigxcpu_hdlr = signal(SIGXCPU, self._handle_sigxcpu)
setrlimit(RLIMIT_CPU, cpu_limit)
if self.max_memory is not None:
self._msentinel = MemorySentinel(1, int(self.max_memory) )
self._old_max_memory = getrlimit(RLIMIT_AS)
self._old_usr1_hdlr = signal(SIGUSR1, self._hangle_sig_memory)
as_limit = (int(self.max_memory), self._old_max_memory[1])
setrlimit(RLIMIT_AS, as_limit)
self._msentinel.start()
self._limit_set += 1
def clean_limit(self):
"""reinstall the old process limit"""
if self._limit_set > 0:
if self.max_time is not None:
self._timer.cancel()
self._elapse_time += int(time())-self._start_time
self._timer = None
signal(SIGUSR2, self._old_usr2_hdlr)
if self.max_cpu_time is not None:
setrlimit(RLIMIT_CPU, self._old_max_cpu_time)
signal(SIGXCPU, self._old_sigxcpu_hdlr)
if self.max_memory is not None:
self._msentinel.stop()
self._msentinel = None
setrlimit(RLIMIT_AS, self._old_max_memory)
signal(SIGUSR1, self._old_usr1_hdlr)
self._limit_set -= 1
| gpl-2.0 | -6,112,673,848,368,580,000 | 32.761733 | 83 | 0.591745 | false |
stefansommer/jetflows | code/run_two_jets.py | 1 | 1970 | #!/usr/bin/python
#
# This file is part of jetflows.
#
# Copyright (C) 2014, Henry O. Jacobs ([email protected]), Stefan Sommer ([email protected])
# https://github.com/nefan/jetflows.git
#
# jetflows is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# jetflows is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with jetflows. If not, see <http://www.gnu.org/licenses/>.
#
import two_jets as tj
import numpy as np
DIM = tj.DIM = 2
N = tj.N = 4
SIGMA = tj.SIGMA = 1.0
def d2zip(grid):
return np.dstack(grid).reshape([-1,2])
q = SIGMA*2*np.random.randn(N,DIM)
#q = SIGMA*2*np.mgrid[-1.5:1.5:np.complex(0,np.sqrt(N)),-1.5:1.5:np.complex(0,np.sqrt(N))] # particles in regular grid
#q = d2zip(q)
q_1 = np.outer(np.ones(N),np.eye(DIM)).reshape([N,DIM,DIM])
q_2 = np.zeros([N,DIM,DIM,DIM])
p = SIGMA*np.random.randn(N,DIM)
mu_1 = SIGMA*np.random.randn(N,DIM,DIM)
mu_2 = np.zeros([N,DIM,DIM,DIM])
for i in range(0,N):
for d in range(0,DIM):
store = (SIGMA**2)*np.random.randn(DIM,DIM)
mu_2[i,d] = 0.5*(store + store.T)
#q = np.array([[-1.0 , 0.0],[1.0,0.0]])
#p = np.zeros([N,DIM])
#mu_1 = np.zeros([N,DIM,DIM])
#mu_2 = np.zeros([N,DIM,DIM,DIM])
#tj.test_functions(0)
(t_span, y_span) = tj.integrate(tj.weinstein_darboux_to_state(q, q_1, q_2, p, mu_1, mu_2 ), T=1.)
print 'initial energy was \n' + str(tj.energy(y_span[0]))
print 'final energy is \n' + str(tj.energy(y_span[-1]))
# save result
np.save('output/state_data',y_span)
np.save('output/time_data',t_span)
np.save('output/setup',[N,DIM,SIGMA])
| agpl-3.0 | 3,034,996,930,457,353,700 | 31.295082 | 118 | 0.671066 | false |
mkeilman/sirepo | tests/template/srw_import_data/srx.py | 2 | 35248 | # -*- coding: utf-8 -*-
#############################################################################
# SRWLIB Example: Virtual Beamline: a set of utilities and functions allowing to simulate
# operation of an SR Beamline.
# The standard use of this script is from command line, with some optional arguments,
# e.g. for calculation (with default parameter values) of:
# UR Spectrum Through a Slit (Flux within a default aperture):
# python SRWLIB_VirtBL_*.py --sm
# Single-Electron UR Spectrum (Flux per Unit Surface):
# python SRWLIB_VirtBL_*.py --ss
# UR Power Density (at the first optical element):
# python SRWLIB_VirtBL_*.py --pw
# Input Single-Electron UR Intensity Distribution (at the first optical element):
# python SRWLIB_VirtBL_*.py --si
# Single-Electron Wavefront Propagation:
# python SRWLIB_VirtBL_*.py --ws
# Multi-Electron Wavefront Propagation:
# Sequential Mode:
# python SRWLIB_VirtBL_*.py --wm
# Parallel Mode (using MPI / mpi4py), e.g.:
# mpiexec -n 6 python SRWLIB_VirtBL_*.py --wm
# For changing parameters of all these calculaitons from the default valuse, see the definition
# of all options in the list at the end of the script.
# v 0.04
#############################################################################
from __future__ import print_function #Python 2.7 compatibility
from srwl_bl import *
try:
import cPickle as pickle
except:
import pickle
#import time
#*********************************Setting Up Optical Elements and Propagation Parameters
def set_optics(_v):
"""This function describes optical layout of the Coherent Hoard X-ray (CHX) beamline of NSLS-II.
Such function has to be written for every beamline to be simulated; it is specific to a particular beamline.
:param _v: structure containing all parameters allowed to be varied for that particular beamline
"""
#---Nominal Positions of Optical Elements [m] (with respect to straight section center)
zS0 = 33.1798 #White Beam Slits (S0)
zHFM = 34.2608 #Horizontally-Focusing Mirror M1 (HFM)
zS1 = 35.6678 #Pink Beam Slits (S1)
zDCM = 36.4488 #Horizontall-Deflecting Double-Crystal Monochromator (DCM)
zBPM1 = 38.6904 #BPM-1
zBPM2 = 50.3872 #BPM-2
zSSA = 50.6572 #Secondary Source Aperture (SSA)
zEA = 61.9611 #Energy Absorber (EA)
zDBPM1 = 62.272 #Diamond BPM-1
zKBFV = 62.663 #High-Flux Vertically-Focusing KB Mirror M2
zKBFH = 63.0 #High-Flux Horizontally-Focusing KB Mirror M3
zSF = 63.3 #High-Flux Sample position (focus of KBF)
zDBPM2 = 65.9178 #Diamond BPM-2
zKBRV = 66.113 #High-Resolution Vertically-Focusing KB Mirror M4
zKBRH = 66.220 #High-Resolution Horizontally-Focusing KB Mirror M5
zSR = 63.3 #High-Resolution Sample position (focus of KBR)
#zD = 65 #Detector position (?)
#---Instantiation of the Optical Elements
arElNamesAll_01 = ['S0', 'S1', 'S1_DCM', 'DCM', 'DCM_SSA', 'SSA', 'SSA_KBFV', 'KBFV', 'KBFV_KBFH', 'KBFH', 'KBFH_zSF']
arElNamesAll_02 = ['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_SSA', 'SSA', 'SSA_KBRV', 'KBRV', 'KBRV_KBRH', 'KBRH', 'KBRH_zSR']
arElNamesAll_03 = ['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_DCM', 'DCM', 'DCM_SSA', 'SSA', 'SSA_DBPM2', 'DBPM2_KBRV', 'KBRV', 'KBRV_KBRH', 'KBRH', 'KBRH_zSR']
arElNamesAll_04 = ['S0', 'S0_HFM', 'HFM', 'HFM_S1', 'S1', 'S1_SSA', 'SSA', 'SSA_DBPM2', 'DBPM2_KBRV', 'KBRV', 'KBRV_KBRH', 'KBRH', 'KBRH_zSR']
arElNamesAll = arElNamesAll_01
if(_v.op_BL == 2): arElNamesAll = arElNamesAll_02
elif(_v.op_BL == 3): arElNamesAll = arElNamesAll_03
elif(_v.op_BL == 4): arElNamesAll = arElNamesAll_04
'''
#Treat beamline sub-cases / alternative configurations
if(len(_v.op_fin) > 0):
if(_v.op_fin not in arElNamesAll): raise Exception('Optical element with the name specified in the "op_fin" option is not present in this beamline')
#Could be made more general
'''
arElNames = [];
for i in range(len(arElNamesAll)):
arElNames.append(arElNamesAll[i])
if(len(_v.op_fin) > 0):
if(arElNamesAll[i] == _v.op_fin): break
el = []; pp = [] #lists of SRW optical element objects and their corresponding propagation parameters
#S0 (primary slit)
if('S0' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S0_dx, _v.op_S0_dy)); pp.append(_v.op_S0_pp)
#Drift S0 -> HFM
if('S0_HFM' in arElNames):
el.append(SRWLOptD(zHFM - zS0)); pp.append(_v.op_S0_HFM_pp)
#HDM (Height Profile Error)
if('HFM' in arElNames):
lenHFM = 0.95 #Length [m]
horApHFM = lenHFM*_v.op_HFM_ang #Projected dimensions
verApHFM = 5.e-03 #?
el.append(SRWLOptA('r', 'a', horApHFM, verApHFM)); pp.append(_v.op_HFMA_pp)
if(_v.op_HFM_f != 0.):
el.append(SRWLOptL(_Fx=_v.op_HFM_f)); pp.append(_v.op_HFML_pp)
#To treat Reflectivity (maybe by Planar Mirror?)
#elif(_v.op_HFM_r != 0.):
#Setup Cylindrical Mirror, take into account Reflectivity
#Height Profile Error
ifnHFM = os.path.join(_v.fdir, _v.op_HFM_ifn) if len(_v.op_HFM_ifn) > 0 else ''
if(len(ifnHFM) > 0):
#hProfDataHFM = srwl_uti_read_data_cols(ifnHFM, '\t', 0, 1)
hProfDataHFM = srwl_uti_read_data_cols(ifnHFM, '\t')
opHFM = srwl_opt_setup_surf_height_2d(hProfDataHFM, 'x', _ang=_v.op_HFM_ang, _amp_coef=_v.op_HFM_amp, _nx=1500, _ny=200)
ofnHFM = os.path.join(_v.fdir, _v.op_HFM_ofn) if len(_v.op_HFM_ofn) > 0 else ''
if(len(ofnHFM) > 0):
pathDifHFM = opHFM.get_data(3, 3)
srwl_uti_save_intens_ascii(pathDifHFM, opHFM.mesh, ofnHFM, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
el.append(opHFM); pp.append(_v.op_HFMT_pp)
#Drift HFM -> S1
if('HFM_S1' in arElNames):
el.append(SRWLOptD(zS1 - zHFM + _v.op_S1_dz)); pp.append(_v.op_HFM_S1_pp)
#S1 slit
if('S1' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_S1_dx, _v.op_S1_dy)); pp.append(_v.op_S1_pp)
#Drift S1 -> DCM
if('S1_DCM' in arElNames):
el.append(SRWLOptD(zDCM - zS1 - _v.op_S1_dz)); pp.append(_v.op_S1_DCM_pp)
#Drift S1 -> SSA
if('S1_SSA' in arElNames):
el.append(SRWLOptD(zSSA - zS1 - _v.op_S1_dz + _v.op_SSA_dz)); pp.append(_v.op_S1_SSA_pp)
#Double-Crystal Monochromator
if('DCM' in arElNames):
tc = 1e-02 # [m] crystal thickness
angAs = 0.*pi/180. # [rad] asymmetry angle
hc = [1,1,1]
if(_v.op_DCM_r == '311'): hc = [3,1,1]
dc = srwl_uti_cryst_pl_sp(hc, 'Si')
#print('DCM Interplannar dist.:', dc)
psi = srwl_uti_cryst_pol_f(_v.op_DCM_e0, hc, 'Si') #MR15032016: replaced "op_DCM_e" by "op_DCM_e0" to test the import in Sirepo
#print('DCM Fourier Components:', psi)
#---------------------- DCM Crystal #1
opCr1 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs, _ang_roll=1.5707963, _e_avg=_v.op_DCM_e0)
#Find appropriate orientation of the Crystal #1 and the Output Beam Frame (using a member-function in SRWLOptCryst):
orientDataCr1 = opCr1.find_orient(_en=_v.op_DCM_e0, _ang_dif_pl=1.5707963) # Horizontally-deflecting #MR15032016: replaced "op_DCM_e" by "op_DCM_e0" to test the import in Sirepo
#Crystal #1 Orientation found:
orientCr1 = orientDataCr1[0]
tCr1 = orientCr1[0] #Tangential Vector to Crystal surface
sCr1 = orientCr1[1]
nCr1 = orientCr1[2] #Normal Vector to Crystal surface
# print('DCM Crystal #1 Orientation (original):')
# print(' t =', tCr1, 's =', orientCr1[1], 'n =', nCr1)
import uti_math
if(_v.op_DCM_ac1 != 0): #Small rotation of DCM Crystal #1:
rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac1, [0,0,0])
tCr1 = uti_math.matr_prod(rot[0], tCr1)
sCr1 = uti_math.matr_prod(rot[0], sCr1)
nCr1 = uti_math.matr_prod(rot[0], nCr1)
#Set the Crystal #1 orientation:
opCr1.set_orient(nCr1[0], nCr1[1], nCr1[2], tCr1[0], tCr1[1])
#Orientation of the Outgoing Beam Frame being found:
orientCr1OutFr = orientDataCr1[1]
rxCr1 = orientCr1OutFr[0] #Horizontal Base Vector of the Output Beam Frame
ryCr1 = orientCr1OutFr[1] #Vertical Base Vector of the Output Beam Frame
rzCr1 = orientCr1OutFr[2] #Longitudinal Base Vector of the Output Beam Frame
# print('DCM Crystal #1 Outgoing Beam Frame:')
# print(' ex =', rxCr1, 'ey =', ryCr1, 'ez =', rzCr1)
#Incoming/Outgoing beam frame transformation matrix for the DCM Crystal #1
TCr1 = [rxCr1, ryCr1, rzCr1]
# print('Total transformation matrix after DCM Crystal #1:')
# uti_math.matr_print(TCr1)
#print(' ')
el.append(opCr1); pp.append(_v.op_DCMC1_pp)
#---------------------- DCM Crystal #2
opCr2 = SRWLOptCryst(_d_sp=dc, _psi0r=psi[0], _psi0i=psi[1], _psi_hr=psi[2], _psi_hi=psi[3], _psi_hbr=psi[2], _psi_hbi=psi[3], _tc=tc, _ang_as=angAs, _ang_roll=-1.5707963, _e_avg=_v.op_DCM_e0)
#Find appropriate orientation of the Crystal #2 and the Output Beam Frame
orientDataCr2 = opCr2.find_orient(_en=_v.op_DCM_e0, _ang_dif_pl=-1.5707963) #MR15032016: replaced "op_DCM_e" by "op_DCM_e0" to test the import in Sirepo
#Crystal #2 Orientation found:
orientCr2 = orientDataCr2[0]
tCr2 = orientCr2[0] #Tangential Vector to Crystal surface
sCr2 = orientCr2[1]
nCr2 = orientCr2[2] #Normal Vector to Crystal surface
# print('Crystal #2 Orientation (original):')
# print(' t =', tCr2, 's =', sCr2, 'n =', nCr2)
if(_v.op_DCM_ac2 != 0): #Small rotation of DCM Crystal #2:
rot = uti_math.trf_rotation([0,1,0], _v.op_DCM_ac2, [0,0,0])
tCr2 = uti_math.matr_prod(rot[0], tCr2)
sCr2 = uti_math.matr_prod(rot[0], sCr2)
nCr2 = uti_math.matr_prod(rot[0], nCr2)
#Set the Crystal #2 orientation
opCr2.set_orient(nCr2[0], nCr2[1], nCr2[2], tCr2[0], tCr2[1])
#Orientation of the Outgoing Beam Frame being found:
orientCr2OutFr = orientDataCr2[1]
rxCr2 = orientCr2OutFr[0] #Horizontal Base Vector of the Output Beam Frame
ryCr2 = orientCr2OutFr[1] #Vertical Base Vector of the Output Beam Frame
rzCr2 = orientCr2OutFr[2] #Longitudinal Base Vector of the Output Beam Frame
# print('DCM Crystal #2 Outgoing Beam Frame:')
# print(' ex =', rxCr2, 'ey =', ryCr2, 'ez =',rzCr2)
#Incoming/Outgoing beam transformation matrix for the DCM Crystal #2
TCr2 = [rxCr2, ryCr2, rzCr2]
Ttot = uti_math.matr_prod(TCr2, TCr1)
# print('Total transformation matrix after DCM Crystal #2:')
# uti_math.matr_print(Ttot)
#print(' ')
el.append(opCr2); pp.append(_v.op_DCMC2_pp)
#Drift DCM -> SSA
if('DCM_SSA' in arElNames):
el.append(SRWLOptD(zSSA - zDCM + _v.op_SSA_dz)); pp.append(_v.op_DCM_SSA_pp)
#SSA slit
if('SSA' in arElNames):
el.append(SRWLOptA('r', 'a', _v.op_SSA_dx, _v.op_SSA_dy)); pp.append(_v.op_SSA_pp)
#Drift SSA -> DBPM2
if('SSA_DBPM2' in arElNames):
el.append(SRWLOptD(zDBPM2 - zSSA - _v.op_SSA_dz + _v.op_DBPM2_dz)); pp.append(_v.op_SSA_DBPM2_pp)
###############To continue
## #Sample
## if('SMP' in arElNames):
## ifnSMP = os.path.join(v.fdir, v.op_SMP_ifn) if len(v.op_SMP_ifn) > 0 else ''
## if(len(ifnSMP) > 0):
## ifSMP = open(ifnSMP, 'rb')
## opSMP = pickle.load(ifSMP)
## ofnSMP = os.path.join(v.fdir, v.op_SMP_ofn) if len(v.op_SMP_ofn) > 0 else ''
## if(len(ofnSMP) > 0):
## pathDifSMP = opSMP.get_data(3, 3)
## srwl_uti_save_intens_ascii(pathDifSMP, opSMP.mesh, ofnSMP, 0, ['', 'Horizontal Position', 'Vertical Position', 'Opt. Path Dif.'], _arUnits=['', 'm', 'm', 'm'])
## el.append(opSMP); pp.append(v.op_SMP_pp)
## ifSMP.close()
## #Drift Sample -> Detector
## if('SMP_D' in arElNames):
## el.append(SRWLOptD(zD - zSample + v.op_D_dz)); pp.append(v.op_SMP_D_pp)
pp.append(_v.op_fin_pp)
return SRWLOptC(el, pp)
#*********************************List of Parameters allowed to be varied
#---List of supported options / commands / parameters allowed to be varied for this Beamline (comment-out unnecessary):
varParam = [
#---Data Folder
['fdir', 's', os.path.join(os.getcwd(), 'data_SRX'), 'folder (directory) name for reading-in input and saving output data files'],
#---Electron Beam
['ebm_nm', 's', 'NSLS-II Low Beta ', 'standard electron beam name'],
['ebm_nms', 's', 'Day1', 'standard electron beam name suffix: e.g. can be Day1, Final'],
['ebm_i', 'f', 0.5, 'electron beam current [A]'],
#['ebeam_e', 'f', 3., 'electron beam avarage energy [GeV]'],
['ebm_de', 'f', 0., 'electron beam average energy deviation [GeV]'],
['ebm_x', 'f', 0., 'electron beam initial average horizontal position [m]'],
['ebm_y', 'f', 0., 'electron beam initial average vertical position [m]'],
['ebm_xp', 'f', 0., 'electron beam initial average horizontal angle [rad]'],
['ebm_yp', 'f', 0., 'electron beam initial average vertical angle [rad]'],
['ebm_z', 'f', 0., 'electron beam initial average longitudinal position [m]'],
['ebm_dr', 'f', 0., 'electron beam longitudinal drift [m] to be performed before a required calculation'],
['ebm_ens', 'f', -1, 'electron beam relative energy spread'],
['ebm_emx', 'f', -1, 'electron beam horizontal emittance [m]'],
['ebm_emy', 'f', -1, 'electron beam vertical emittance [m]'],
#---Undulator
['und_per', 'f', 0.021, 'undulator period [m]'],
['und_len', 'f', 1.5, 'undulator length [m]'],
['und_b', 'f', 0.88770981, 'undulator vertical peak magnetic field [T]'],
#['und_bx', 'f', 0., 'undulator horizontal peak magnetic field [T]'],
#['und_by', 'f', 1., 'undulator vertical peak magnetic field [T]'],
#['und_phx', 'f', 1.5708, 'undulator horizontal magnetic field phase [rad]'],
#['und_phy', 'f', 0., 'undulator vertical magnetic field phase [rad]'],
['und_sx', 'i', 1.0, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
#['und_sy', 'i', -1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
['und_zc', 'f', 1.305, 'undulator center longitudinal position [m]'],
['und_mdir', 's', 'magn_meas', 'name of magnetic measurements sub-folder'],
['und_mfs', 's', 'ivu21_srx_sum.txt', 'name of magnetic measurements for different gaps summary file'],
#['und_g', 'f', 0., 'undulator gap [mm] (assumes availability of magnetic measurement or simulation data)'],
#---Calculation Types
#Electron Trajectory
['tr', '', '', 'calculate electron trajectory', 'store_true'],
['tr_cti', 'f', 0., 'initial time moment (c*t) for electron trajectory calculation [m]'],
['tr_ctf', 'f', 0., 'final time moment (c*t) for electron trajectory calculation [m]'],
['tr_np', 'f', 50000, 'number of points for trajectory calculation'],
['tr_mag', 'i', 1, 'magnetic field to be used for trajectory calculation: 1- approximate, 2- accurate'],
['tr_fn', 's', 'res_trj.dat', 'file name for saving calculated trajectory data'],
['tr_pl', 's', 'xxpyypz', 'plot the resulting trajectiry in graph(s): ""- dont plot, otherwise the string should list the trajectory components to plot'],
#Single-Electron Spectrum vs Photon Energy
['ss', '', '', 'calculate single-e spectrum vs photon energy', 'store_true'],
['ss_ei', 'f', 100., 'initial photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ef', 'f', 20000., 'final photon energy [eV] for single-e spectrum vs photon energy calculation'],
['ss_ne', 'i', 10000, 'number of points vs photon energy for single-e spectrum vs photon energy calculation'],
['ss_x', 'f', 0., 'horizontal position [m] for single-e spectrum vs photon energy calculation'],
['ss_y', 'f', 0., 'vertical position [m] for single-e spectrum vs photon energy calculation'],
['ss_meth', 'i', 1, 'method to use for single-e spectrum vs photon energy calculation: 0- "manual", 1- "auto-undulator", 2- "auto-wiggler"'],
['ss_prec', 'f', 0.01, 'relative precision for single-e spectrum vs photon energy calculation (nominal value is 0.01)'],
['ss_pol', 'i', 6, 'polarization component to extract after spectrum vs photon energy calculation: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['ss_mag', 'i', 1, 'magnetic field to be used for single-e spectrum vs photon energy calculation: 1- approximate, 2- accurate'],
['ss_fn', 's', 'res_spec_se.dat', 'file name for saving calculated single-e spectrum vs photon energy'],
['ss_pl', 's', 'e', 'plot the resulting single-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#Multi-Electron Spectrum vs Photon Energy (taking into account e-beam emittance, energy spread and collection aperture size)
['sm', '', '', 'calculate multi-e spectrum vs photon energy', 'store_true'],
['sm_ei', 'f', 100., 'initial photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ef', 'f', 20000., 'final photon energy [eV] for multi-e spectrum vs photon energy calculation'],
['sm_ne', 'i', 10000, 'number of points vs photon energy for multi-e spectrum vs photon energy calculation'],
['sm_x', 'f', 0., 'horizontal center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_rx', 'f', 0.001, 'range of horizontal position / horizontal aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_nx', 'i', 1, 'number of points vs horizontal position for multi-e spectrum vs photon energy calculation'],
['sm_y', 'f', 0., 'vertical center position [m] for multi-e spectrum vs photon energy calculation'],
['sm_ry', 'f', 0.001, 'range of vertical position / vertical aperture size [m] for multi-e spectrum vs photon energy calculation'],
['sm_ny', 'i', 1, 'number of points vs vertical position for multi-e spectrum vs photon energy calculation'],
['sm_mag', 'i', 1, 'magnetic field to be used for calculation of multi-e spectrum spectrum or intensity distribution: 1- approximate, 2- accurate'],
['sm_hi', 'i', 1, 'initial UR spectral harmonic to be taken into accountfor multi-e spectrum vs photon energy calculation'],
['sm_hf', 'i', 15, 'final UR spectral harmonic to be taken into accountfor multi-e spectrum vs photon energy calculation'],
['sm_prl', 'f', 1., 'longitudinal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_pra', 'f', 1., 'azimuthal integration precision parameter for multi-e spectrum vs photon energy calculation'],
['sm_type', 'i', 1, 'calculate flux (=1) or flux per unit surface (=2)'],
['sm_pol', 'i', 6, 'polarization component to extract after calculation of multi-e flux or intensity: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['sm_fn', 's', 'res_spec_me.dat', 'file name for saving calculated milti-e spectrum vs photon energy'],
['sm_pl', 's', 'e', 'plot the resulting spectrum-e spectrum in a graph: ""- dont plot, "e"- show plot vs photon energy'],
#to add options for the multi-e calculation from "accurate" magnetic field
#Power Density Distribution vs horizontal and vertical position
['pw', '', '', 'calculate SR power density distribution', 'store_true'],
['pw_x', 'f', 0., 'central horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_rx', 'f', 0.015, 'range of horizontal position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_nx', 'i', 100, 'number of points vs horizontal position for calculation of power density distribution'],
['pw_y', 'f', 0., 'central vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ry', 'f', 0.015, 'range of vertical position [m] for calculation of power density distribution vs horizontal and vertical position'],
['pw_ny', 'i', 100, 'number of points vs vertical position for calculation of power density distribution'],
['pw_pr', 'f', 1., 'precision factor for calculation of power density distribution'],
['pw_meth', 'i', 1, 'power density computation method (1- "near field", 2- "far field")'],
['pw_zi', 'f', 0., 'initial longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_zf', 'f', 0., 'final longitudinal position along electron trajectory of power density distribution (effective if pow_sst < pow_sfi)'],
['pw_mag', 'i', 1, 'magnetic field to be used for power density calculation: 1- approximate, 2- accurate'],
['pw_fn', 's', 'res_pow.dat', 'file name for saving calculated power density distribution'],
['pw_pl', 's', 'xy', 'plot the resulting power density distribution in a graph: ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
#Single-Electron Intensity distribution vs horizontal and vertical position
['si', '', '', 'calculate single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position', 'store_true'],
#Single-Electron Wavefront Propagation
['ws', '', '', 'calculate single-electron (/ fully coherent) wavefront propagation', 'store_true'],
#Multi-Electron (partially-coherent) Wavefront Propagation
['wm', '', '', 'calculate multi-electron (/ partially coherent) wavefront propagation', 'store_true'],
['w_e', 'f', 9000., 'photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ef', 'f', -1., 'final photon energy [eV] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ne', 'i', 1, 'number of points vs photon energy for calculation of intensity distribution'],
['w_x', 'f', 0., 'central horizontal position [m] for calculation of intensity distribution'],
['w_rx', 'f', 2.4e-03, 'range of horizontal position [m] for calculation of intensity distribution'],
['w_nx', 'i', 100, 'number of points vs horizontal position for calculation of intensity distribution'],
['w_y', 'f', 0., 'central vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ry', 'f', 2.0e-03, 'range of vertical position [m] for calculation of intensity distribution vs horizontal and vertical position'],
['w_ny', 'i', 100, 'number of points vs vertical position for calculation of intensity distribution'],
['w_smpf', 'f', 1., 'sampling factor for calculation of intensity distribution vs horizontal and vertical position'],
['w_meth', 'i', 1, 'method to use for calculation of intensity distribution vs horizontal and vertical position'],
['w_prec', 'f', 0.01, 'relative precision for calculation of intensity distribution vs horizontal and vertical position'],
['w_mag', 'i', 1, 'magnetic field to be used for calculation of intensity distribution vs horizontal and vertical position: 1- approximate, 2- accurate'],
['si_pol', 'i', 6, 'polarization component to extract after calculation of intensity distribution: 0- Linear Horizontal, 1- Linear Vertical, 2- Linear 45 degrees, 3- Linear 135 degrees, 4- Circular Right, 5- Circular Left, 6- Total'],
['si_type', 'i', 0, 'type of a characteristic to be extracted after calculation of intensity distribution: 0- Single-Electron Intensity, 1- Multi-Electron Intensity, 2- Single-Electron Flux, 3- Multi-Electron Flux, 4- Single-Electron Radiation Phase, 5- Re(E): Real part of Single-Electron Electric Field, 6- Im(E): Imaginary part of Single-Electron Electric Field, 7- Single-Electron Intensity, integrated over Time or Photon Energy'],
['si_fn', 's', 'res_int_se.dat', 'file name for saving calculated single-e intensity distribution (without wavefront propagation through a beamline) vs horizontal and vertical position'],
['ws_fni', 's', 'res_int_pr_se.dat', 'file name for saving propagated single-e intensity distribution vs horizontal and vertical position'],
['ws_pl', 's', 'xy', 'plot the propagated radiaiton intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['ws_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters (/ Intensity distribution): coordinate (0) or angular (1)'],
['si_pl', 's', 'xy', 'plot the input intensity distributions in graph(s): ""- dont plot, "x"- vs horizontal position, "y"- vs vertical position, "xy"- vs horizontal and vertical position'],
['wm_nm', 'i', 100000, 'number of macro-electrons (coherent wavefronts) for calculation of multi-electron wavefront propagation'],
['wm_na', 'i', 5, 'number of macro-electrons (coherent wavefronts) to average on each node at parallel (MPI-based) calculation of multi-electron wavefront propagation'],
['wm_ns', 'i', 5, 'saving periodicity (in terms of macro-electrons / coherent wavefronts) for intermediate intensity at multi-electron wavefront propagation calculation'],
['wm_ch', 'i', 0, 'type of a characteristic to be extracted after calculation of multi-electron wavefront propagation: #0- intensity (s0); 1- four Stokes components; 2- mutual intensity cut vs x; 3- mutual intensity cut vs y'],
['wm_ap', 'i', 0, 'switch specifying representation of the resulting Stokes parameters: coordinate (0) or angular (1)'],
['wm_x0', 'f', 0, 'horizontal center position for mutual intensity cut calculation'],
['wm_y0', 'f', 0, 'vertical center position for mutual intensity cut calculation'],
['wm_ei', 'i', 0, 'integration over photon energy is required (1) or not (0); if the integration is required, the limits are taken from w_e, w_ef'],
['wm_rm', 'i', 1, 'method for generation of pseudo-random numbers for e-beam phase-space integration: 1- standard pseudo-random number generator, 2- Halton sequences, 3- LPtau sequences (to be implemented)'],
['wm_fni', 's', 'res_int_pr_me.dat', 'file name for saving propagated multi-e intensity distribution vs horizontal and vertical position'],
#['ws_fn', 's', '', 'file name for saving single-e (/ fully coherent) wavefront data'],
#['wm_fn', 's', '', 'file name for saving multi-e (/ partially coherent) wavefront data'],
#to add options
['op_r', 'f', 33.1798, 'longitudinal position of the first optical element [m]'],
['op_fin', 's', 'S3_SMP', 'name of the final optical element wavefront has to be propagated through'],
#NOTE: the above option/variable names (fdir, ebm*, und*, ss*, sm*, pw*, is*, ws*, wm*) should be the same in all beamline scripts
#on the other hand, the beamline optics related options below (op*) are specific to a particular beamline (and can be differ from beamline to beamline).
#However, the default values of all the options/variables (above and below) can differ from beamline to beamline.
#---Beamline Optics
['op_BL', 'f', 1, 'beamline version/option number'],
['op_S0_dx', 'f', 2.375e-03, 'slit S0: horizontal size [m]'],
['op_S0_dy', 'f', 2.0e-03, 'slit S0: vertical size [m]'],
['op_HFM_f', 'f', 11.0893, 'mirror HFM: focal length [m] (effective if op_HFM_f != 0)'],
['op_HFM_r', 'f', 8.924e+03, 'mirror HFM: radius of curvature [m] (effective if op_HFM_r != 0 and op_HFM_f == 0)'],
['op_HFM_ang', 'f', 2.5e-03, 'mirror HFM: angle of incidence [rad]'],
['op_HFM_mat', 's', '', 'mirror HFM: coating material; possible options: Si, Cr, Rh, Pt'],
['op_HFM_ifn', 's', 'mir_metro/SRX_HFM_height_prof.dat', 'mirror HFM: input file name of height profile data'],
#['op_HFM_ifn', 's', '', 'mirror HFM: input file name of height profile data'],
['op_HFM_amp', 'f', 1., 'mirror HFM: amplification coefficient for height profile data'],
['op_HFM_ofn', 's', 'res_SRX_HFM_opt_path_dif.dat', 'mirror HCM: output file name of optical path difference data'],
['op_S1_dz', 'f', 0., 'S1: offset of longitudinal position [m]'],
['op_S1_dx', 'f', 2.375e-03, 'slit S1: horizontal size [m]'],
['op_S1_dy', 'f', 10.0e-03, 'slit S1: vertical size [m]'],
['op_DCM_e0', 'f', 8999., 'DCM: central photon energy DCM is tuned to [eV]'], #MR15032016: replaced "op_DCM_e" by "op_DCM_e0" to test the import in Sirepo
['op_DCM_r', 's', '111', 'DCM: reflection type (can be either "111" or "311")'],
['op_DCM_ac1', 'f', 0., 'DCM: angular deviation of 1st crystal from exact Bragg angle [rad]'],
['op_DCM_ac2', 'f', 0., 'DCM: angular deviation of 2nd crystal from exact Bragg angle [rad]'],
['op_SSA_dz', 'f', 0., 'slit SSA: offset of longitudinal position [m]'],
['op_SSA_dx', 'f', 3.0e-03, 'slit SSA: horizontal size [m]'],
['op_SSA_dy', 'f', 3.0e-03, 'slit SSA: vertical size [m]'],
['op_DBPM2_dz', 'f', 0., 'slit DBPM2: offset of longitudinal position [m]'],
###############To continue
## ['op_SMP_dz', 'f', 0., 'sample: offset of longitudinal position [m]'],
## ['op_SMP_ifn', 's', 'CHX_SMP_CDI_001.pickle', 'sample: model file name (binary "dumped" SRW transmission object)'],
## ['op_SMP_ofn', 's', 'res_CHX_SMP_opt_path_dif.dat', 'sample: output file name of optical path difference data'],
## ['op_D_dz', 'f', 0., 'detector: offset of longitudinal position [m]'],
#to add options for different beamline cases, etc.
#Propagation Param.: [0][1][2][3][4] [5] [6] [7] [8] [9][10][11]
#['op_S0_pp', 'f', [0, 0, 1, 0, 0, 4.5, 5.0, 1.5, 2.5, 0, 0, 0], 'slit S0: propagation parameters'],
#['op_S0_pp', 'f', [0, 0, 1, 0, 0, 2.2, 6.0, 3.0, 15.0, 0, 0, 0], 'slit S0: propagation parameters'],
#['op_S0_pp', 'f', [0, 0, 1, 0, 0, 2.0, 15.0,1.5, 15.0,0, 0, 0], 'slit S0: propagation parameters'],
['op_S0_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S0: propagation parameters'],
['op_S0_HFM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S0 -> HFM: propagation parameters'],
['op_HFMA_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HCM: Aperture propagation parameters'],
['op_HFML_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HCM: Lens propagation parameters'],
['op_HFMT_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'mirror HCM: Transmission propagation parameters'],
['op_HFM_S1_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift HDM -> S1: propagation parameters'],
['op_S1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit S1: propagation parameters'],
['op_S1_SSA_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> SSA: propagation parameters'],
['op_S1_DCM_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S1 -> DCM: propagation parameters'],
['op_DCMC1_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM C1: propagation parameters'],
['op_DCMC2_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'DCM C2: propagation parameters'],
['op_DCM_SSA_pp', 'f', [0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift DCM -> SSA: propagation parameters'],
['op_SSA_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'slit SSA: propagation parameters'],
['op_SSA_DBPM2_pp', 'f',[0, 0, 1, 1, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift SSA -> DBPM2: propagation parameters'],
###############To continue
## ['op_S3_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'drift S3 -> sample: propagation parameters'],
## ['op_SMP_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample: propagation parameters'],
## ['op_SMP_D_pp', 'f', [0, 0, 1, 3, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'sample -> detector: propagation parameters'],
#['op_fin_pp', 'f', [0, 0, 1, 0, 1, 0.1, 5.0, 1.0, 1.5, 0, 0, 0], 'final post-propagation (resize) parameters'],
['op_fin_pp', 'f', [0, 0, 1, 0, 0, 1.0, 1.0, 1.0, 1.0, 0, 0, 0], 'final post-propagation (resize) parameters'],
#[ 0]: Auto-Resize (1) or not (0) Before propagation
#[ 1]: Auto-Resize (1) or not (0) After propagation
#[ 2]: Relative Precision for propagation with Auto-Resizing (1. is nominal)
#[ 3]: Allow (1) or not (0) for semi-analytical treatment of the quadratic (leading) phase terms at the propagation
#[ 4]: Do any Resizing on Fourier side, using FFT, (1) or not (0)
#[ 5]: Horizontal Range modification factor at Resizing (1. means no modification)
#[ 6]: Horizontal Resolution modification factor at Resizing
#[ 7]: Vertical Range modification factor at Resizing
#[ 8]: Vertical Resolution modification factor at Resizing
#[ 9]: Type of wavefront Shift before Resizing (not yet implemented)
#[10]: New Horizontal wavefront Center position after Shift (not yet implemented)
#[11]: New Vertical wavefront Center position after Shift (not yet implemented)
#[12]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Horizontal Coordinate
#[13]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Vertical Coordinate
#[14]: Optional: Orientation of the Output Optical Axis vector in the Incident Beam Frame: Longitudinal Coordinate
#[15]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Horizontal Coordinate
#[16]: Optional: Orientation of the Horizontal Base vector of the Output Frame in the Incident Beam Frame: Vertical Coordinate
]
varParam = srwl_uti_ext_options(varParam) #Adding other default options
#*********************************Entry
if __name__ == "__main__":
#---Parse options, defining Beamline elements and running calculations
v = srwl_uti_parse_options(varParam)
#---Add some constant "parameters" (not allowed to be varied) for the beamline
v.und_per = 0.021 #['und_per', 'f', 0.021, 'undulator period [m]'],
v.und_len = 1.5 #['und_len', 'f', 1.5, 'undulator length [m]'],
v.und_zc = 1.305 #['und_zc', 'f', 1.305, 'undulator center longitudinal position [m]'],
v.und_sy = -1 #['und_sy', 'i', -1, 'undulator horizontal magnetic field symmetry vs longitudinal position'],
v.und_sx = 1 #['und_sx', 'i', 1, 'undulator vertical magnetic field symmetry vs longitudinal position'],
#---Setup optics only if Wavefront Propagation is required:
v.ws = True
op = set_optics(v) if(v.ws or v.wm) else None
#---Run all requested calculations
SRWLBeamline('SRX beamline').calc_all(v, op)
| apache-2.0 | -2,868,898,278,633,323,500 | 66.915222 | 440 | 0.636064 | false |
luiseduardohdbackup/odoo | addons/auth_signup/res_config.py | 445 | 2860 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.safe_eval import safe_eval
class base_config_settings(osv.TransientModel):
_inherit = 'base.config.settings'
_columns = {
'auth_signup_reset_password': fields.boolean('Enable password reset from Login page',
help="This allows users to trigger a password reset from the Login page."),
'auth_signup_uninvited': fields.boolean('Allow external users to sign up',
help="If unchecked, only invited users may sign up."),
'auth_signup_template_user_id': fields.many2one('res.users',
string='Template user for new users created through signup'),
}
def get_default_auth_signup_template_user_id(self, cr, uid, fields, context=None):
icp = self.pool.get('ir.config_parameter')
# we use safe_eval on the result, since the value of the parameter is a nonempty string
return {
'auth_signup_reset_password': safe_eval(icp.get_param(cr, uid, 'auth_signup.reset_password', 'False')),
'auth_signup_uninvited': safe_eval(icp.get_param(cr, uid, 'auth_signup.allow_uninvited', 'False')),
'auth_signup_template_user_id': safe_eval(icp.get_param(cr, uid, 'auth_signup.template_user_id', 'False')),
}
def set_auth_signup_template_user_id(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context=context)
icp = self.pool.get('ir.config_parameter')
# we store the repr of the values, since the value of the parameter is a required string
icp.set_param(cr, uid, 'auth_signup.reset_password', repr(config.auth_signup_reset_password))
icp.set_param(cr, uid, 'auth_signup.allow_uninvited', repr(config.auth_signup_uninvited))
icp.set_param(cr, uid, 'auth_signup.template_user_id', repr(config.auth_signup_template_user_id.id))
| agpl-3.0 | -8,442,038,951,579,543,000 | 54 | 119 | 0.647902 | false |
imjonsnooow/synapse | synapse/tests/test_async.py | 1 | 3507 | import time
import unittest
import threading
import synapse.async as s_async
import synapse.lib.threads as s_threads
from synapse.tests.common import *
class AsyncTests(SynTest):
def test_async_basics(self):
boss = s_async.Boss()
data = {}
def jobmeth(x, y=20):
return x + y
def jobdork(x, y=20):
raise Exception('hi')
def jobdone(job):
name = job[1].get('name')
data[name] = job
jid1 = s_async.jobid()
jid2 = s_async.jobid()
task1 = (jobmeth, (3,), {})
task2 = (jobdork, (3,), {})
job1 = boss.initJob(jid1, task=task1, name='job1', ondone=jobdone)
job2 = boss.initJob(jid2, task=task2, name='job2', ondone=jobdone)
self.assertEqual( job1[0], jid1 )
self.assertEqual( len(boss.jobs()), 2 )
boss._runJob(job1)
self.assertEqual( len(boss.jobs()), 1 )
boss._runJob(job2)
self.assertEqual( len(boss.jobs()), 0 )
ret1 = data.get('job1')
self.assertIsNotNone(ret1)
self.assertEqual( ret1[1]['ret'], 23 )
ret2 = data.get('job2')
self.assertIsNotNone(ret2)
self.assertEqual( ret2[1]['err'], 'Exception' )
boss.fini()
def test_async_pool_basics(self):
boss = s_async.Boss()
boss.runBossPool(3)
data = {}
def jobmeth(x, y=20):
return x + y
def jobdork(x, y=20):
raise Exception('hi')
def jobdone(job):
name = job[1].get('name')
data[name] = job
jid1 = s_async.jobid()
jid2 = s_async.jobid()
task1 = (jobmeth, (3,), {})
task2 = (jobdork, (3,), {})
job1 = boss.initJob(jid1, task=task1, name='job1', ondone=jobdone)
job2 = boss.initJob(jid2, task=task2, name='job2', ondone=jobdone)
self.assertEqual( job1[0], jid1 )
boss.wait(jid1, timeout=1)
boss.wait(jid2, timeout=1)
ret1 = data.get('job1')
self.assertIsNotNone(ret1)
self.assertEqual( ret1[1]['ret'], 23 )
ret2 = data.get('job2')
self.assertIsNotNone(ret2)
self.assertEqual( ret2[1]['err'], 'Exception' )
boss.fini()
def test_async_timeout(self):
boss = s_async.Boss()
def myjob():
time.sleep(0.2)
jid = s_async.jobid()
job = boss.initJob(jid, task=(myjob,(),{}), timeout=0.01)
boss.wait(jid)
self.assertEqual( job[1]['err'], 'HitMaxTime' )
boss.fini()
def test_async_ondone(self):
boss = s_async.Boss()
boss.runBossPool(3)
data = {}
evt = threading.Event()
def ondone(job):
data['job'] = job
evt.set()
def woot():
return 10
jid = s_async.jobid()
task = s_async.newtask(woot)
boss.initJob(jid, task=task, ondone=ondone)
self.assertTrue( evt.wait(timeout=1) )
job = data.get('job')
self.assertEqual( job[1].get('ret'), 10 )
boss.fini()
def test_async_wait_timeout(self):
def longtime():
time.sleep(0.1)
boss = s_async.Boss()
boss.runBossPool(1)
jid = s_async.jobid()
task = s_async.newtask(longtime)
boss.initJob(jid, task=task)
self.assertFalse( boss.wait(jid,timeout=0.01) )
self.assertTrue( boss.wait(jid,timeout=1) )
boss.fini()
| apache-2.0 | 2,413,508,944,930,172,000 | 21.480769 | 74 | 0.528372 | false |
rob356/SickRage | lib/unidecode/x06e.py | 252 | 4640 | data = (
'Ben ', # 0x00
'Yuan ', # 0x01
'Wen ', # 0x02
'Re ', # 0x03
'Fei ', # 0x04
'Qing ', # 0x05
'Yuan ', # 0x06
'Ke ', # 0x07
'Ji ', # 0x08
'She ', # 0x09
'Yuan ', # 0x0a
'Shibui ', # 0x0b
'Lu ', # 0x0c
'Zi ', # 0x0d
'Du ', # 0x0e
'[?] ', # 0x0f
'Jian ', # 0x10
'Min ', # 0x11
'Pi ', # 0x12
'Tani ', # 0x13
'Yu ', # 0x14
'Yuan ', # 0x15
'Shen ', # 0x16
'Shen ', # 0x17
'Rou ', # 0x18
'Huan ', # 0x19
'Zhu ', # 0x1a
'Jian ', # 0x1b
'Nuan ', # 0x1c
'Yu ', # 0x1d
'Qiu ', # 0x1e
'Ting ', # 0x1f
'Qu ', # 0x20
'Du ', # 0x21
'Feng ', # 0x22
'Zha ', # 0x23
'Bo ', # 0x24
'Wo ', # 0x25
'Wo ', # 0x26
'Di ', # 0x27
'Wei ', # 0x28
'Wen ', # 0x29
'Ru ', # 0x2a
'Xie ', # 0x2b
'Ce ', # 0x2c
'Wei ', # 0x2d
'Ge ', # 0x2e
'Gang ', # 0x2f
'Yan ', # 0x30
'Hong ', # 0x31
'Xuan ', # 0x32
'Mi ', # 0x33
'Ke ', # 0x34
'Mao ', # 0x35
'Ying ', # 0x36
'Yan ', # 0x37
'You ', # 0x38
'Hong ', # 0x39
'Miao ', # 0x3a
'Xing ', # 0x3b
'Mei ', # 0x3c
'Zai ', # 0x3d
'Hun ', # 0x3e
'Nai ', # 0x3f
'Kui ', # 0x40
'Shi ', # 0x41
'E ', # 0x42
'Pai ', # 0x43
'Mei ', # 0x44
'Lian ', # 0x45
'Qi ', # 0x46
'Qi ', # 0x47
'Mei ', # 0x48
'Tian ', # 0x49
'Cou ', # 0x4a
'Wei ', # 0x4b
'Can ', # 0x4c
'Tuan ', # 0x4d
'Mian ', # 0x4e
'Hui ', # 0x4f
'Mo ', # 0x50
'Xu ', # 0x51
'Ji ', # 0x52
'Pen ', # 0x53
'Jian ', # 0x54
'Jian ', # 0x55
'Hu ', # 0x56
'Feng ', # 0x57
'Xiang ', # 0x58
'Yi ', # 0x59
'Yin ', # 0x5a
'Zhan ', # 0x5b
'Shi ', # 0x5c
'Jie ', # 0x5d
'Cheng ', # 0x5e
'Huang ', # 0x5f
'Tan ', # 0x60
'Yu ', # 0x61
'Bi ', # 0x62
'Min ', # 0x63
'Shi ', # 0x64
'Tu ', # 0x65
'Sheng ', # 0x66
'Yong ', # 0x67
'Qu ', # 0x68
'Zhong ', # 0x69
'Suei ', # 0x6a
'Jiu ', # 0x6b
'Jiao ', # 0x6c
'Qiou ', # 0x6d
'Yin ', # 0x6e
'Tang ', # 0x6f
'Long ', # 0x70
'Huo ', # 0x71
'Yuan ', # 0x72
'Nan ', # 0x73
'Ban ', # 0x74
'You ', # 0x75
'Quan ', # 0x76
'Chui ', # 0x77
'Liang ', # 0x78
'Chan ', # 0x79
'Yan ', # 0x7a
'Chun ', # 0x7b
'Nie ', # 0x7c
'Zi ', # 0x7d
'Wan ', # 0x7e
'Shi ', # 0x7f
'Man ', # 0x80
'Ying ', # 0x81
'Ratsu ', # 0x82
'Kui ', # 0x83
'[?] ', # 0x84
'Jian ', # 0x85
'Xu ', # 0x86
'Lu ', # 0x87
'Gui ', # 0x88
'Gai ', # 0x89
'[?] ', # 0x8a
'[?] ', # 0x8b
'Po ', # 0x8c
'Jin ', # 0x8d
'Gui ', # 0x8e
'Tang ', # 0x8f
'Yuan ', # 0x90
'Suo ', # 0x91
'Yuan ', # 0x92
'Lian ', # 0x93
'Yao ', # 0x94
'Meng ', # 0x95
'Zhun ', # 0x96
'Sheng ', # 0x97
'Ke ', # 0x98
'Tai ', # 0x99
'Da ', # 0x9a
'Wa ', # 0x9b
'Liu ', # 0x9c
'Gou ', # 0x9d
'Sao ', # 0x9e
'Ming ', # 0x9f
'Zha ', # 0xa0
'Shi ', # 0xa1
'Yi ', # 0xa2
'Lun ', # 0xa3
'Ma ', # 0xa4
'Pu ', # 0xa5
'Wei ', # 0xa6
'Li ', # 0xa7
'Cai ', # 0xa8
'Wu ', # 0xa9
'Xi ', # 0xaa
'Wen ', # 0xab
'Qiang ', # 0xac
'Ze ', # 0xad
'Shi ', # 0xae
'Su ', # 0xaf
'Yi ', # 0xb0
'Zhen ', # 0xb1
'Sou ', # 0xb2
'Yun ', # 0xb3
'Xiu ', # 0xb4
'Yin ', # 0xb5
'Rong ', # 0xb6
'Hun ', # 0xb7
'Su ', # 0xb8
'Su ', # 0xb9
'Ni ', # 0xba
'Ta ', # 0xbb
'Shi ', # 0xbc
'Ru ', # 0xbd
'Wei ', # 0xbe
'Pan ', # 0xbf
'Chu ', # 0xc0
'Chu ', # 0xc1
'Pang ', # 0xc2
'Weng ', # 0xc3
'Cang ', # 0xc4
'Mie ', # 0xc5
'He ', # 0xc6
'Dian ', # 0xc7
'Hao ', # 0xc8
'Huang ', # 0xc9
'Xi ', # 0xca
'Zi ', # 0xcb
'Di ', # 0xcc
'Zhi ', # 0xcd
'Ying ', # 0xce
'Fu ', # 0xcf
'Jie ', # 0xd0
'Hua ', # 0xd1
'Ge ', # 0xd2
'Zi ', # 0xd3
'Tao ', # 0xd4
'Teng ', # 0xd5
'Sui ', # 0xd6
'Bi ', # 0xd7
'Jiao ', # 0xd8
'Hui ', # 0xd9
'Gun ', # 0xda
'Yin ', # 0xdb
'Gao ', # 0xdc
'Long ', # 0xdd
'Zhi ', # 0xde
'Yan ', # 0xdf
'She ', # 0xe0
'Man ', # 0xe1
'Ying ', # 0xe2
'Chun ', # 0xe3
'Lu ', # 0xe4
'Lan ', # 0xe5
'Luan ', # 0xe6
'[?] ', # 0xe7
'Bin ', # 0xe8
'Tan ', # 0xe9
'Yu ', # 0xea
'Sou ', # 0xeb
'Hu ', # 0xec
'Bi ', # 0xed
'Biao ', # 0xee
'Zhi ', # 0xef
'Jiang ', # 0xf0
'Kou ', # 0xf1
'Shen ', # 0xf2
'Shang ', # 0xf3
'Di ', # 0xf4
'Mi ', # 0xf5
'Ao ', # 0xf6
'Lu ', # 0xf7
'Hu ', # 0xf8
'Hu ', # 0xf9
'You ', # 0xfa
'Chan ', # 0xfb
'Fan ', # 0xfc
'Yong ', # 0xfd
'Gun ', # 0xfe
'Man ', # 0xff
)
| gpl-3.0 | -6,910,024,332,784,715,000 | 16.984496 | 20 | 0.388362 | false |
akshayaurora/kivy | kivy/uix/colorpicker.py | 3 | 16388 | '''
Color Picker
============
.. versionadded:: 1.7.0
.. warning::
This widget is experimental. Its use and API can change at any time until
this warning is removed.
.. image:: images/colorpicker.png
:align: right
The ColorPicker widget allows a user to select a color from a chromatic
wheel where pinch and zoom can be used to change the wheel's saturation.
Sliders and TextInputs are also provided for entering the RGBA/HSV/HEX values
directly.
Usage::
clr_picker = ColorPicker()
parent.add_widget(clr_picker)
# To monitor changes, we can bind to color property changes
def on_color(instance, value):
print "RGBA = ", str(value) # or instance.color
print "HSV = ", str(instance.hsv)
print "HEX = ", str(instance.hex_color)
clr_picker.bind(color=on_color)
'''
__all__ = ('ColorPicker', 'ColorWheel')
from kivy.uix.relativelayout import RelativeLayout
from kivy.uix.widget import Widget
from kivy.properties import (NumericProperty, BoundedNumericProperty,
ListProperty, ObjectProperty,
ReferenceListProperty, StringProperty,
AliasProperty)
from kivy.clock import Clock
from kivy.graphics import Mesh, InstructionGroup, Color
from kivy.utils import get_color_from_hex, get_hex_from_color
from kivy.logger import Logger
from math import cos, sin, pi, sqrt, atan
from colorsys import rgb_to_hsv, hsv_to_rgb
def distance(pt1, pt2):
return sqrt((pt1[0] - pt2[0]) ** 2. + (pt1[1] - pt2[1]) ** 2.)
def polar_to_rect(origin, r, theta):
return origin[0] + r * cos(theta), origin[1] + r * sin(theta)
def rect_to_polar(origin, x, y):
if x == origin[0]:
if y == origin[1]:
return (0, 0)
elif y > origin[1]:
return (y - origin[1], pi / 2.)
else:
return (origin[1] - y, 3 * pi / 2.)
t = atan(float((y - origin[1])) / (x - origin[0]))
if x - origin[0] < 0:
t += pi
if t < 0:
t += 2 * pi
return (distance((x, y), origin), t)
class ColorWheel(Widget):
'''Chromatic wheel for the ColorPicker.
.. versionchanged:: 1.7.1
`font_size`, `font_name` and `foreground_color` have been removed. The
sizing is now the same as others widget, based on 'sp'. Orientation is
also automatically determined according to the width/height ratio.
'''
r = BoundedNumericProperty(0, min=0, max=1)
'''The Red value of the color currently selected.
:attr:`r` is a :class:`~kivy.properties.BoundedNumericProperty` and
can be a value from 0 to 1. It defaults to 0.
'''
g = BoundedNumericProperty(0, min=0, max=1)
'''The Green value of the color currently selected.
:attr:`g` is a :class:`~kivy.properties.BoundedNumericProperty`
and can be a value from 0 to 1.
'''
b = BoundedNumericProperty(0, min=0, max=1)
'''The Blue value of the color currently selected.
:attr:`b` is a :class:`~kivy.properties.BoundedNumericProperty` and
can be a value from 0 to 1.
'''
a = BoundedNumericProperty(0, min=0, max=1)
'''The Alpha value of the color currently selected.
:attr:`a` is a :class:`~kivy.properties.BoundedNumericProperty` and
can be a value from 0 to 1.
'''
color = ReferenceListProperty(r, g, b, a)
'''The holds the color currently selected.
:attr:`color` is a :class:`~kivy.properties.ReferenceListProperty` and
contains a list of `r`, `g`, `b`, `a` values.
'''
_origin = ListProperty((100, 100))
_radius = NumericProperty(100)
_piece_divisions = NumericProperty(10)
_pieces_of_pie = NumericProperty(16)
_inertia_slowdown = 1.25
_inertia_cutoff = .25
_num_touches = 0
_pinch_flag = False
_hsv = ListProperty([1, 1, 1, 0])
def __init__(self, **kwargs):
super(ColorWheel, self).__init__(**kwargs)
pdv = self._piece_divisions
self.sv_s = [(float(x) / pdv, 1) for x in range(pdv)] + [
(1, float(y) / pdv) for y in reversed(range(pdv))]
def on__origin(self, instance, value):
self.init_wheel(None)
def on__radius(self, instance, value):
self.init_wheel(None)
def init_wheel(self, dt):
# initialize list to hold all meshes
self.canvas.clear()
self.arcs = []
self.sv_idx = 0
pdv = self._piece_divisions
ppie = self._pieces_of_pie
for r in range(pdv):
for t in range(ppie):
self.arcs.append(
_ColorArc(
self._radius * (float(r) / float(pdv)),
self._radius * (float(r + 1) / float(pdv)),
2 * pi * (float(t) / float(ppie)),
2 * pi * (float(t + 1) / float(ppie)),
origin=self._origin,
color=(float(t) / ppie,
self.sv_s[self.sv_idx + r][0],
self.sv_s[self.sv_idx + r][1],
1)))
self.canvas.add(self.arcs[-1])
def recolor_wheel(self):
ppie = self._pieces_of_pie
for idx, segment in enumerate(self.arcs):
segment.change_color(
sv=self.sv_s[int(self.sv_idx + idx / ppie)])
def change_alpha(self, val):
for idx, segment in enumerate(self.arcs):
segment.change_color(a=val)
def inertial_incr_sv_idx(self, dt):
# if its already zoomed all the way out, cancel the inertial zoom
if self.sv_idx == len(self.sv_s) - self._piece_divisions:
return False
self.sv_idx += 1
self.recolor_wheel()
if dt * self._inertia_slowdown > self._inertia_cutoff:
return False
else:
Clock.schedule_once(self.inertial_incr_sv_idx,
dt * self._inertia_slowdown)
def inertial_decr_sv_idx(self, dt):
# if its already zoomed all the way in, cancel the inertial zoom
if self.sv_idx == 0:
return False
self.sv_idx -= 1
self.recolor_wheel()
if dt * self._inertia_slowdown > self._inertia_cutoff:
return False
else:
Clock.schedule_once(self.inertial_decr_sv_idx,
dt * self._inertia_slowdown)
def on_touch_down(self, touch):
r = self._get_touch_r(touch.pos)
if r > self._radius:
return False
# code is still set up to allow pinch to zoom, but this is
# disabled for now since it was fiddly with small wheels.
# Comment out these lines and adjust on_touch_move to reenable
# this.
if self._num_touches != 0:
return False
touch.grab(self)
self._num_touches += 1
touch.ud['anchor_r'] = r
touch.ud['orig_sv_idx'] = self.sv_idx
touch.ud['orig_time'] = Clock.get_time()
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
r = self._get_touch_r(touch.pos)
goal_sv_idx = (touch.ud['orig_sv_idx'] -
int((r - touch.ud['anchor_r']) /
(float(self._radius) / self._piece_divisions)))
if (
goal_sv_idx != self.sv_idx and
goal_sv_idx >= 0 and
goal_sv_idx <= len(self.sv_s) - self._piece_divisions
):
# this is a pinch to zoom
self._pinch_flag = True
self.sv_idx = goal_sv_idx
self.recolor_wheel()
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
self._num_touches -= 1
if self._pinch_flag:
if self._num_touches == 0:
# user was pinching, and now both fingers are up. Return
# to normal
if self.sv_idx > touch.ud['orig_sv_idx']:
Clock.schedule_once(
self.inertial_incr_sv_idx,
(Clock.get_time() - touch.ud['orig_time']) /
(self.sv_idx - touch.ud['orig_sv_idx']))
if self.sv_idx < touch.ud['orig_sv_idx']:
Clock.schedule_once(
self.inertial_decr_sv_idx,
(Clock.get_time() - touch.ud['orig_time']) /
(self.sv_idx - touch.ud['orig_sv_idx']))
self._pinch_flag = False
return
else:
# user was pinching, and at least one finger remains. We
# don't want to treat the remaining fingers as touches
return
else:
r, theta = rect_to_polar(self._origin, *touch.pos)
# if touch up is outside the wheel, ignore
if r >= self._radius:
return
# compute which ColorArc is being touched (they aren't
# widgets so we don't get collide_point) and set
# _hsv based on the selected ColorArc
piece = int((theta / (2 * pi)) * self._pieces_of_pie)
division = int((r / self._radius) * self._piece_divisions)
hsva = list(
self.arcs[self._pieces_of_pie * division + piece].color)
self.color = list(hsv_to_rgb(*hsva[:3])) + hsva[-1:]
def _get_touch_r(self, pos):
return distance(pos, self._origin)
class _ColorArc(InstructionGroup):
def __init__(self, r_min, r_max, theta_min, theta_max,
color=(0, 0, 1, 1), origin=(0, 0), **kwargs):
super(_ColorArc, self).__init__(**kwargs)
self.origin = origin
self.r_min = r_min
self.r_max = r_max
self.theta_min = theta_min
self.theta_max = theta_max
self.color = color
self.color_instr = Color(*color, mode='hsv')
self.add(self.color_instr)
self.mesh = self.get_mesh()
self.add(self.mesh)
def __str__(self):
return "r_min: %s r_max: %s theta_min: %s theta_max: %s color: %s" % (
self.r_min, self.r_max, self.theta_min, self.theta_max, self.color
)
def get_mesh(self):
v = []
# first calculate the distance between endpoints of the outer
# arc, so we know how many steps to use when calculating
# vertices
theta_step_outer = 0.1
theta = self.theta_max - self.theta_min
d_outer = int(theta / theta_step_outer)
theta_step_outer = theta / d_outer
if self.r_min == 0:
for x in range(0, d_outer, 2):
v += (polar_to_rect(self.origin, self.r_max,
self.theta_min + x * theta_step_outer
) * 2)
v += polar_to_rect(self.origin, 0, 0) * 2
v += (polar_to_rect(self.origin, self.r_max,
self.theta_min + (x + 1) * theta_step_outer
) * 2)
if not d_outer & 1: # add a last point if d_outer is even
v += (polar_to_rect(self.origin, self.r_max,
self.theta_min + d_outer * theta_step_outer
) * 2)
else:
for x in range(d_outer + 1):
v += (polar_to_rect(self.origin, self.r_min,
self.theta_min + x * theta_step_outer
) * 2)
v += (polar_to_rect(self.origin, self.r_max,
self.theta_min + x * theta_step_outer
) * 2)
return Mesh(vertices=v, indices=range(int(len(v) / 4)),
mode='triangle_strip')
def change_color(self, color=None, color_delta=None, sv=None, a=None):
self.remove(self.color_instr)
if color is not None:
self.color = color
elif color_delta is not None:
self.color = [self.color[i] + color_delta[i] for i in range(4)]
elif sv is not None:
self.color = (self.color[0], sv[0], sv[1], self.color[3])
elif a is not None:
self.color = (self.color[0], self.color[1], self.color[2], a)
self.color_instr = Color(*self.color, mode='hsv')
self.insert(0, self.color_instr)
class ColorPicker(RelativeLayout):
'''
See module documentation.
'''
font_name = StringProperty('data/fonts/RobotoMono-Regular.ttf')
'''Specifies the font used on the ColorPicker.
:attr:`font_name` is a :class:`~kivy.properties.StringProperty` and
defaults to 'data/fonts/RobotoMono-Regular.ttf'.
'''
color = ListProperty((1, 1, 1, 1))
'''The :attr:`color` holds the color currently selected in rgba format.
:attr:`color` is a :class:`~kivy.properties.ListProperty` and defaults to
(1, 1, 1, 1).
'''
def _get_hsv(self):
return rgb_to_hsv(*self.color[:3])
def _set_hsv(self, value):
if self._updating_clr:
return
self.set_color(value)
hsv = AliasProperty(_get_hsv, _set_hsv, bind=('color', ))
'''The :attr:`hsv` holds the color currently selected in hsv format.
:attr:`hsv` is a :class:`~kivy.properties.ListProperty` and defaults to
(1, 1, 1).
'''
def _get_hex(self):
return get_hex_from_color(self.color)
def _set_hex(self, value):
if self._updating_clr:
return
self.set_color(get_color_from_hex(value)[:4])
hex_color = AliasProperty(_get_hex, _set_hex, bind=('color',), cache=True)
'''The :attr:`hex_color` holds the currently selected color in hex.
:attr:`hex_color` is an :class:`~kivy.properties.AliasProperty` and
defaults to `#ffffffff`.
'''
wheel = ObjectProperty(None)
'''The :attr:`wheel` holds the color wheel.
:attr:`wheel` is an :class:`~kivy.properties.ObjectProperty` and
defaults to None.
'''
_update_clr_ev = _update_hex_ev = None
# now used only internally.
foreground_color = ListProperty((1, 1, 1, 1))
def _trigger_update_clr(self, mode, clr_idx, text):
if self._updating_clr:
return
self._updating_clr = True
self._upd_clr_list = mode, clr_idx, text
ev = self._update_clr_ev
if ev is None:
ev = self._update_clr_ev = Clock.create_trigger(self._update_clr)
ev()
def _update_clr(self, dt):
# to prevent interaction between hsv/rgba, we work internaly using rgba
mode, clr_idx, text = self._upd_clr_list
try:
text = min(255, max(0, float(text)))
if mode == 'rgb':
self.color[clr_idx] = float(text) / 255.
else:
hsv = list(self.hsv[:])
hsv[clr_idx] = float(text) / 255.
self.color[:3] = hsv_to_rgb(*hsv)
except ValueError:
Logger.warning('ColorPicker: invalid value : {}'.format(text))
finally:
self._updating_clr = False
def _update_hex(self, dt):
try:
if len(self._upd_hex_list) != 9:
return
self._updating_clr = False
self.hex_color = self._upd_hex_list
finally:
self._updating_clr = False
def _trigger_update_hex(self, text):
if self._updating_clr:
return
self._updating_clr = True
self._upd_hex_list = text
ev = self._update_hex_ev
if ev is None:
ev = self._update_hex_ev = Clock.create_trigger(self._update_hex)
ev()
def set_color(self, color):
self._updating_clr = True
if len(color) == 3:
self.color[:3] = color
else:
self.color = color
self._updating_clr = False
def __init__(self, **kwargs):
self._updating_clr = False
super(ColorPicker, self).__init__(**kwargs)
if __name__ in ('__android__', '__main__'):
from kivy.app import App
class ColorPickerApp(App):
def build(self):
cp = ColorPicker(pos_hint={'center_x': .5, 'center_y': .5},
size_hint=(1, 1))
return cp
ColorPickerApp().run()
| mit | -5,219,930,589,021,605,000 | 32.789691 | 79 | 0.538809 | false |
alhashash/odoomrp-wip | mrp_operations_time_control/models/operation_time.py | 16 | 3343 | # -*- encoding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from openerp import api, models, fields
class MrpProductionWorkcenterLine(models.Model):
_inherit = 'mrp.production.workcenter.line'
operation_time_lines = fields.One2many('operation.time.line',
'operation_time',
string='Operation Time Lines')
def _create_operation_line(self):
self.env['operation.time.line'].create({
'start_date': fields.Datetime.now(),
'operation_time': self.id,
'user': self.env.uid})
def _write_end_date_operation_line(self):
self.operation_time_lines[-1].end_date = fields.Datetime.now()
def action_start_working(self):
result = super(MrpProductionWorkcenterLine,
self).action_start_working()
self._create_operation_line()
return result
def action_pause(self):
result = super(MrpProductionWorkcenterLine, self).action_pause()
self._write_end_date_operation_line()
return result
def action_resume(self):
result = super(MrpProductionWorkcenterLine, self).action_resume()
self._create_operation_line()
return result
def action_done(self):
result = super(MrpProductionWorkcenterLine, self).action_done()
self._write_end_date_operation_line()
return result
class OperationTimeLine(models.Model):
_name = 'operation.time.line'
_rec_name = 'operation_time'
def _default_user(self):
return self.env.uid
start_date = fields.Datetime(string='Start Date')
end_date = fields.Datetime(string='End Date')
operation_time = fields.Many2one('mrp.production.workcenter.line')
uptime = fields.Float(string='Uptime', compute='operation_uptime',
store=True, digits=(12, 6))
production = fields.Many2one('mrp.production',
related='operation_time.production_id',
string='Production', store=True)
user = fields.Many2one('res.users', string='User', default=_default_user)
@api.one
@api.depends('start_date', 'end_date')
def operation_uptime(self):
if self.end_date and self.start_date:
timedelta = fields.Datetime.from_string(self.end_date) - \
fields.Datetime.from_string(self.start_date)
self.uptime = timedelta.total_seconds() / 3600.
else:
self.uptime = 0
| agpl-3.0 | -1,394,578,828,778,536,400 | 37.872093 | 78 | 0.602453 | false |
milfeld/lammps | python/examples/mc.py | 15 | 2535 | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# mc.py
# Purpose: mimic operation of example/MC/in.mc via Python
# Syntax: mc.py in.mc
# in.mc = LAMMPS input script
import sys,random,math
# set these parameters
# make sure neigh skin (in in.mc) > 2*deltamove
nloop = 3000
deltaperturb = 0.2
deltamove = 0.1
kT = 0.05
random.seed(27848)
# parse command line
argv = sys.argv
if len(argv) != 2:
print "Syntax: mc.py in.mc"
sys.exit()
infile = sys.argv[1]
from lammps import lammps
lmp = lammps()
# run infile one line at a time
# just sets up MC problem
lines = open(infile,'r').readlines()
for line in lines: lmp.command(line)
lmp.command("variable e equal pe")
# run 0 to get energy of perfect lattice
# emin = minimum energy
lmp.command("run 0")
natoms = lmp.extract_global("natoms",0)
emin = lmp.extract_compute("thermo_pe",0,0) / natoms
lmp.command("variable emin equal $e")
# disorder the system
# estart = initial energy
x = lmp.extract_atom("x",3)
for i in xrange(natoms):
x[i][0] += deltaperturb * (2*random.random()-1)
x[i][1] += deltaperturb * (2*random.random()-1)
lmp.command("variable elast equal $e")
lmp.command("thermo_style custom step v_emin v_elast pe")
lmp.command("run 0")
x = lmp.extract_atom("x",3)
lmp.command("variable elast equal $e")
estart = lmp.extract_compute("thermo_pe",0,0) / natoms
# loop over Monte Carlo moves
# extract x after every run, in case reneighboring changed ptr in LAMMPS
elast = estart
naccept = 0
for i in xrange(nloop):
iatom = random.randrange(0,natoms)
x0 = x[iatom][0]
y0 = x[iatom][1]
x[iatom][0] += deltamove * (2*random.random()-1)
x[iatom][1] += deltamove * (2*random.random()-1)
lmp.command("run 1 pre no post no")
x = lmp.extract_atom("x",3)
e = lmp.extract_compute("thermo_pe",0,0) / natoms
if e <= elast:
elast = e
lmp.command("variable elast equal $e")
naccept += 1
elif random.random() <= math.exp(natoms*(elast-e)/kT):
elast = e
lmp.command("variable elast equal $e")
naccept += 1
else:
x[iatom][0] = x0
x[iatom][1] = y0
# final energy and stats
lmp.command("variable nbuild equal nbuild")
nbuild = lmp.extract_variable("nbuild",None,0)
lmp.command("run 0")
estop = lmp.extract_compute("thermo_pe",0,0) / natoms
print "MC stats:"
print " starting energy =",estart
print " final energy =",estop
print " minimum energy of perfect lattice =",emin
print " accepted MC moves =",naccept
print " neighbor list rebuilds =",nbuild
| gpl-2.0 | -400,698,895,635,863,000 | 22.472222 | 72 | 0.672978 | false |
linglung/ytdl | youtube_dl/extractor/morningstar.py | 220 | 1732 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
class MorningstarIE(InfoExtractor):
IE_DESC = 'morningstar.com'
_VALID_URL = r'https?://(?:www\.)?morningstar\.com/[cC]over/video[cC]enter\.aspx\?id=(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.morningstar.com/cover/videocenter.aspx?id=615869',
'md5': '6c0acface7a787aadc8391e4bbf7b0f5',
'info_dict': {
'id': '615869',
'ext': 'mp4',
'title': 'Get Ahead of the Curve on 2013 Taxes',
'description': "Vanguard's Joel Dickson on managing higher tax rates for high-income earners and fund capital-gain distributions in 2013.",
'thumbnail': r're:^https?://.*m(?:orning)?star\.com/.+thumb\.jpg$'
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(
r'<h1 id="titleLink">(.*?)</h1>', webpage, 'title')
video_url = self._html_search_regex(
r'<input type="hidden" id="hidVideoUrl" value="([^"]+)"',
webpage, 'video URL')
thumbnail = self._html_search_regex(
r'<input type="hidden" id="hidSnapshot" value="([^"]+)"',
webpage, 'thumbnail', fatal=False)
description = self._html_search_regex(
r'<div id="mstarDeck".*?>(.*?)</div>',
webpage, 'description', fatal=False)
return {
'id': video_id,
'title': title,
'url': video_url,
'thumbnail': thumbnail,
'description': description,
}
| unlicense | -8,856,469,103,416,465,000 | 35.851064 | 151 | 0.551963 | false |
defionscode/ansible | test/units/modules/network/slxos/test_slxos_config.py | 30 | 8136 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.slxos import slxos_config
from units.modules.utils import set_module_args
from .slxos_module import TestSlxosModule, load_fixture
class TestSlxosConfigModule(TestSlxosModule):
module = slxos_config
def setUp(self):
super(TestSlxosConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.slxos.slxos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.slxos.slxos_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.slxos.slxos_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestSlxosConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
config_file = 'slxos_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_slxos_config_unchanged(self):
src = load_fixture('slxos_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_slxos_config_src(self):
src = load_fixture('slxos_config_src.cfg')
set_module_args(dict(src=src))
commands = ['hostname foo', 'interface Ethernet 0/0',
'no ip address']
self.execute_module(changed=True, commands=commands)
def test_slxos_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_slxos_config_save_always(self):
self.run_commands.return_value = "Hostname foo"
set_module_args(dict(save_when='always'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 1)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
args = self.run_commands.call_args[0][1]
self.assertIn('copy running-config startup-config', args['command'])
def test_slxos_config_save_changed_true(self):
src = load_fixture('slxos_config_src.cfg')
set_module_args(dict(src=src, save_when='changed'))
commands = ['hostname foo', 'interface Ethernet 0/0', 'no ip address']
self.execute_module(changed=True, commands=commands)
self.assertEqual(self.run_commands.call_count, 1)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 1)
args = self.run_commands.call_args[0][1]
self.assertIn('copy running-config startup-config', args['command'])
def test_slxos_config_save_changed_false(self):
set_module_args(dict(save_when='changed'))
self.execute_module(changed=False)
self.assertEqual(self.run_commands.call_count, 0)
self.assertEqual(self.get_config.call_count, 0)
self.assertEqual(self.load_config.call_count, 0)
def test_slxos_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands)
def test_slxos_config_lines_w_parents(self):
set_module_args(dict(lines=['shutdown'], parents=['interface Ethernet 0/0']))
commands = ['interface Ethernet 0/0', 'shutdown']
self.execute_module(changed=True, commands=commands)
def test_slxos_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False)
def test_slxos_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False)
def test_slxos_config_before_after_no_change(self):
set_module_args(dict(lines=['hostname router'],
before=['test1', 'test2'],
after=['test3', 'test4']))
self.execute_module()
def test_slxos_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands)
def test_slxos_config_replace_block(self):
lines = ['description test string', 'test string']
parents = ['interface Ethernet 0/0']
set_module_args(dict(lines=lines, replace='block', parents=parents))
commands = parents + lines
self.execute_module(changed=True, commands=commands)
def test_slxos_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines)
def test_slxos_config_match_none(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string']
parents = ['interface Ethernet 0/0']
set_module_args(dict(lines=lines, parents=parents, match='none'))
commands = parents + lines
self.execute_module(changed=True, commands=commands, sort=False)
def test_slxos_config_match_strict(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string',
'shutdown']
parents = ['interface Ethernet 0/0']
set_module_args(dict(lines=lines, parents=parents, match='strict'))
commands = parents + ['shutdown']
self.execute_module(changed=True, commands=commands, sort=False)
def test_slxos_config_match_exact(self):
lines = ['ip address 1.2.3.4 255.255.255.0', 'description test string',
'shutdown']
parents = ['interface Ethernet 0/0']
set_module_args(dict(lines=lines, parents=parents, match='exact'))
commands = parents + lines
self.execute_module(changed=True, commands=commands, sort=False)
def test_slxos_config_src_and_lines_fails(self):
args = dict(src='foo', lines='foo')
set_module_args(args)
self.execute_module(failed=True)
def test_slxos_config_src_and_parents_fails(self):
args = dict(src='foo', parents='foo')
set_module_args(args)
self.execute_module(failed=True)
def test_slxos_config_match_exact_requires_lines(self):
args = dict(match='exact')
set_module_args(args)
self.execute_module(failed=True)
def test_slxos_config_match_strict_requires_lines(self):
args = dict(match='strict')
set_module_args(args)
self.execute_module(failed=True)
def test_slxos_config_replace_block_requires_lines(self):
args = dict(replace='block')
set_module_args(args)
self.execute_module(failed=True)
def test_slxos_config_replace_config_requires_src(self):
args = dict(replace='config')
set_module_args(args)
self.execute_module(failed=True)
| gpl-3.0 | -2,331,172,785,893,253,600 | 40.723077 | 97 | 0.658555 | false |
felipedau/pyaxo | examples/axotor.py | 2 | 24592 | #!/usr/bin/env python
import socket
import threading
import sys
import os
import curses
import socks
import stem.process
from wh import WHMgr
import random
from binascii import a2b_base64 as a2b
from binascii import b2a_base64 as b2a
from getpass import getpass
from smp import SMP
from stem.control import Controller
from stem.util import term
from curses.textpad import Textbox
from contextlib import contextmanager
from pyaxo import Axolotl, hash_
from time import sleep
"""
Standalone chat script using libsodium for encryption with the Axolotl
ratchet for key management.
This version of the chat client makes connections over the tor network.
The server creates an ephemeral hidden service and the client connects
to the hidden service. You will need to load the following additional
python modules for this to work: stem, pysocks, txtorcon, pysocks,
and magic-wormhole. They are available on pypi via pip.
Axotor also requires tor (>=2.9.1). Currently this is in the unstable
branch. So you may need to update your distribution's tor repository
accordingly.
The only inputs required are the nicks of the conversants, as well as
a master key. The master key must be of the form N-x where N is an
integer (1-3 digits should be fine) and x is a lower-case alphanumeric
string. The initial axolotl database configuration and credential
exchange are derived from the master key. The master key should be
exchanged out-of-band between the two conversants before communication
can be established. An example master key might be 293-xyzzy (don't use
this one).
On start, the program initializes a tor server and exchanges axolotl
and hidden service authentication credentials over tor using PAKE
(password-authenticated key exchange). The specific implementation of
PAKE used is https://github.com/warner/magic-wormhole. The server
side creates an ephemeral hidden service that requires basic_auth
to connect.
The hidden service and axolotl credentials are ephemeral. They exist
only in ram, and will be deleted upon exit from the program. Exit by
typing .quit at the chat prompt.
The client also does an authentication step using the Socialist
Millionaire's Protocol. During startup, after a network connection is
established, you will be prompted for a secret. If the secret
matches that input by the other party, a chat is established and
the input window text appears in green. If the secret does not match
the other party's secret, you will be prompted whether or not to
continue. If you continue, the input window text will appear in red
to remind you that the session is unauthenticated.
The Axolotl protocol is actually authenticated through the key
agreement process when the credentials are created. You may wonder why
the additional SMP authentication step is included. This SMP step
is another way to assure you that the other party to your session is
actually who you think it is.
In axotor.py, no database or key is ever stored to disk.
Usage:
1. One side starts the server with:
axotor.py -s
2. The other side connects the client to the server with:
axotor.py -c
3. .quit at the chat prompt will quit (don't forget the "dot")
4. .send <filename> will send a file to the other party. The file
can be from anywhere in the filesystem on the sending computer
(~/a/b/<filename> supported) and will be stored in the receiver's
local directory.
Axochat requires the Axolotl module at https://github.com/rxcomm/pyaxo
Copyright (C) 2015-2017 by David R. Andersen <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
TOR_SERVER_PORT = 9054
TOR_SERVER_CONTROL_PORT = 9055
TOR_CLIENT_PORT = 9154
TOR_CLIENT_CONTROL_PORT = 9155
CLIENT_FILE_TX_PORT = 2000
SERVER_FILE_TX_PORT = 2001
# An attempt to limit the damage from this bug in curses:
# https://bugs.python.org/issue13051
# The input textbox is 8 rows high. So assuming a maximum
# terminal width of 512 columns, we arrive at 8x512=4096.
# Most terminal windows should be smaller than this.
sys.setrecursionlimit(4096)
@contextmanager
def socketcontext(*args, **kwargs):
s = socket.socket(*args, **kwargs)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
yield s
s.close()
@contextmanager
def torcontext():
try:
s = socks.socksocket()
s.set_proxy(socks.SOCKS5, '127.0.0.1', TOR_CLIENT_PORT)
yield s
s.close()
except socks.SOCKS5Error:
print ''
print 'You need to wait long enough for the Hidden Service'
print 'at the server to be established. Try again in a'
print 'minute or two.'
class _Textbox(Textbox):
"""
curses.textpad.Textbox requires users to ^g on completion, which is sort
of annoying for an interactive chat client such as this, which typically only
reuquires an enter. This subclass fixes this problem by signalling completion
on Enter as well as ^g. Also, map <Backspace> key to ^h.
"""
def __init__(*args, **kwargs):
Textbox.__init__(*args, **kwargs)
def do_command(self, ch):
if ch == curses.KEY_RESIZE:
resizeWindows()
for i in range(8): # delete 8 input window lines
Textbox.do_command(self, 1)
Textbox.do_command(self, 11)
Textbox.do_command(self, 16)
return Textbox.do_command(self, 7)
if ch == 10: # Enter
return 0
if ch == 127: # Backspace
return 8
return Textbox.do_command(self, ch)
def validator(ch):
"""
Update screen if necessary and release the lock so receiveThread can run
"""
global screen_needs_update
try:
if screen_needs_update:
curses.doupdate()
screen_needs_update = False
return ch
finally:
winlock.release()
sleep(0.01) # let receiveThread in if necessary
winlock.acquire()
def windowFactory():
stdscr = curses.initscr()
curses.noecho()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_RED, -1)
curses.init_pair(2, curses.COLOR_GREEN, -1)
curses.init_pair(3, curses.COLOR_YELLOW, -1)
curses.cbreak()
curses.curs_set(1)
(sizey, sizex) = stdscr.getmaxyx()
input_win = curses.newwin(8, sizex, sizey-8, 0)
output_win = curses.newwin(sizey-8, sizex, 0, 0)
input_win.idlok(1)
input_win.scrollok(1)
input_win.nodelay(1)
input_win.leaveok(0)
input_win.timeout(100)
output_win.idlok(1)
output_win.scrollok(1)
output_win.leaveok(0)
return stdscr, input_win, output_win
def closeWindows(stdscr):
curses.nocbreak()
stdscr.keypad(0)
curses.echo()
curses.endwin()
def resizeWindows():
global stdscr, input_win, output_win, textpad, text_color
temp_win = output_win
yold, xold = output_win.getmaxyx()
stdscr, input_win, output_win = windowFactory()
stdscr.noutrefresh()
ynew, xnew = output_win.getmaxyx()
if yold > ynew:
sminrow = yold - ynew
dminrow = 0
else:
sminrow = 0
dminrow = ynew - yold
temp_win.overwrite(output_win, sminrow, 0, dminrow, 0, ynew-1, xnew-1)
del temp_win
output_win.move(ynew-1, 0)
output_win.noutrefresh()
input_win.attron(text_color)
input_win.noutrefresh()
curses.doupdate()
textpad = _Textbox(input_win, insert_mode=True)
textpad.stripspaces = True
def usage():
print 'Usage: ' + sys.argv[0] + ' -(s,c)'
print ' -s: start a chat in server mode'
print ' -c: start a chat in client mode'
print 'quit with .quit'
print 'send file with .send <filename>'
sys.exit(1)
def reportTransferSocketError():
global output_win
with winlock:
output_win.addstr('Socket error: Something went wrong.\n',
curses.color_pair(1))
output_win.refresh()
sys.exit()
def sendFile(s, filename, abort):
global axolotl, output_win
if abort:
with cryptlock:
data = axolotl.encrypt('ABORT') + 'EOP'
s.send(data)
try:
s.recv(3, socket.MSG_WAITALL)
except socket.error:
pass
sys.exit()
else:
with winlock:
output_win.addstr('Sending file %s...\n' % filename,
curses.color_pair(3))
output_win.refresh()
with open(filename, 'rb') as f:
data = f.read()
if len(data) == 0:
data = 'Sender tried to send a null file!'
with cryptlock:
data = axolotl.encrypt(data)
s.send(data + 'EOP')
try:
s.recv(3, socket.MSG_WAITALL)
except socket.error:
pass
def receiveFile(s, filename):
global axolotl, output_win
data = ''
while data[-3:] != 'EOP':
rcv = s.recv(4096)
data = data + rcv
if not rcv:
with winlock:
output_win.addstr('Receiving %s aborted...\n' % filename,
curses.color_pair(1))
output_win.refresh()
try:
s.send('EOP')
except socket.error:
pass
sys.exit()
with cryptlock:
data = axolotl.decrypt(data[:-3])
if data == 'ABORT':
with winlock:
output_win.addstr('Receiving %s aborted...\n' % filename,
curses.color_pair(1))
output_win.refresh()
sys.exit()
with open(filename, 'wb') as f:
f.write(data)
try:
s.send('EOP')
except socket.error:
pass
def uploadThread(onion, command):
global output_win
with transferlock:
filename = command.split(':> .send ')[1].strip()
filename = os.path.expanduser(filename)
if not os.path.exists(filename) or os.path.isdir(filename):
with winlock:
output_win.addstr('File %s does not exist...\n' % filename,
curses.color_pair(1))
output_win.refresh()
abort = True
else:
abort = False
if onion is None:
with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind(('localhost', SERVER_FILE_TX_PORT))
s.listen(1)
conn, addr = s.accept()
except socket.error:
reportTransferSocketError()
sendFile(conn, filename, abort)
else:
with torcontext() as s:
try:
s.connect((onion, CLIENT_FILE_TX_PORT))
except socket.error:
reportTransferSocketError()
sendFile(s, filename, abort)
with winlock:
output_win.addstr('%s sent...\n' % filename, curses.color_pair(3))
output_win.refresh()
def downloadThread(onion, command):
global output_win
with transferlock:
filename = command.split(':> .send ')[1].strip().split('/').pop()
with winlock:
output_win.addstr('Receiving file %s...\n' % filename,
curses.color_pair(3))
output_win.refresh()
if onion is None:
with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind(('localhost', CLIENT_FILE_TX_PORT))
s.listen(1)
conn, addr = s.accept()
except socket.error:
reportTransferSocketError()
receiveFile(conn, filename)
else:
with torcontext() as s:
try:
s.connect((onion, SERVER_FILE_TX_PORT))
except socket.error:
reportTransferSocketError()
receiveFile(s, filename)
with winlock:
output_win.addstr('%s received...\n' % filename, curses.color_pair(3))
output_win.refresh()
def receiveThread(sock, text_color, onion):
global screen_needs_update, a, stdscr, input_win, output_win
while True:
data = ''
while data[-3:] != 'EOP':
rcv = sock.recv(1024)
if not rcv:
input_win.move(0, 0)
input_win.addstr('Disconnected - Ctrl-C to exit!',
text_color)
input_win.refresh()
sys.exit()
data = data + rcv
data_list = data.split('EOP')
with winlock:
(cursory, cursorx) = input_win.getyx()
for data in data_list:
if data != '':
with cryptlock:
data = axolotl.decrypt(data)
if ':> .quit' in data:
closeWindows(stdscr)
print 'The other party exited the chat...'
sleep(1.5)
os._exit(0)
if ':> .send ' in data:
t = threading.Thread(target=downloadThread,
args=(onion,data))
t.start()
data = ''
output_win.addstr(data)
input_win.move(cursory, cursorx)
input_win.cursyncup()
input_win.noutrefresh()
output_win.noutrefresh()
screen_needs_update = True
def chatThread(sock, smp_match, onion):
global screen_needs_update, axolotl, stdscr, input_win, \
output_win, textpad, text_color
stdscr, input_win, output_win = windowFactory()
y, x = output_win.getmaxyx()
output_win.move(y-1, 0)
if smp_match:
text_color = curses.color_pair(2) # green
else:
text_color = curses.color_pair(1) # red
input_win.attron(text_color)
input_win.addstr(0, 0, NICK + ':> ')
textpad = _Textbox(input_win, insert_mode=True)
textpad.stripspaces = True
t = threading.Thread(target=receiveThread,
args=(sock, text_color, onion))
t.daemon = True
t.start()
try:
while True:
with winlock:
data = textpad.edit(validator)
if len(data) != 0 and chr(127) not in data:
input_win.clear()
input_win.addstr(NICK+':> ')
output_win.addstr(data.replace('\n', '') + '\n', text_color)
output_win.noutrefresh()
input_win.move(0, len(NICK)+3)
input_win.cursyncup()
input_win.noutrefresh()
screen_needs_update = True
data = data.replace('\n', '') + '\n'
try:
with cryptlock:
sock.send(axolotl.encrypt(data) + 'EOP')
except socket.error:
input_win.addstr('Disconnected')
input_win.refresh()
closeWindows(stdscr)
sys.exit()
if NICK+':> .quit' in data:
closeWindows(stdscr)
print 'Notifying the other party that you are quitting...'
sys.exit()
elif NICK+':> .send ' in data:
t = threading.Thread(target=uploadThread,
args=(onion,data))
t.start()
else:
input_win.addstr(NICK+':> ')
input_win.move(0, len(NICK)+3)
input_win.cursyncup()
input_win.noutrefresh()
screen_needs_update = True
if screen_needs_update:
curses.doupdate()
screen_needs_update = False
except KeyboardInterrupt:
closeWindows(stdscr)
def tor(port, controlport, tor_dir, descriptor_cookie):
tor_process = stem.process.launch_tor_with_config(
tor_cmd = 'tor',
config = {
'ControlPort': str(controlport),
'SocksPort' : str(port),
'Log' : [ 'NOTICE stdout', 'ERR file /tmp/tor_error_log', ],
'DataDirectory' : tor_dir,
'CookieAuthentication': '1',
'HidServAuth': descriptor_cookie,
},
completion_percent = 100,
take_ownership = True,
timeout = 90,
init_msg_handler = print_bootstrap_lines,
)
return tor_process
def print_bootstrap_lines(line):
if 'Bootstrapped ' in line:
print(term.format(line, term.Color.RED))
def clientController(descriptor_cookie, onion):
controller = Controller.from_port(address='127.0.0.1',
port=TOR_CLIENT_CONTROL_PORT)
try:
controller.authenticate()
controller.set_options([
('HidServAuth', onion + ' ' + descriptor_cookie),
])
except Exception as e:
print e
return controller
def ephemeralHiddenService():
PORT = 50000
HOST = '127.0.0.1'
print 'Waiting for Hidden Service descriptor to be published...'
print ' (this may take some time)'
controller = Controller.from_port(address='127.0.0.1',
port=TOR_SERVER_CONTROL_PORT)
controller.authenticate()
try:
hs = controller.create_ephemeral_hidden_service([PORT,
CLIENT_FILE_TX_PORT,
SERVER_FILE_TX_PORT],
basic_auth={'axotor': None},
await_publication=True)
except Exception as e:
print e
return controller, hs.client_auth['axotor'], hs.service_id +'.onion'
def credentialsSend(mkey, cookie, ratchet_key, onion):
w = WHMgr(unicode(mkey),
cookie+'___'+ratchet_key+'___'+onion,
u'tcp:127.0.0.1:'+unicode(TOR_SERVER_CONTROL_PORT))
w.send()
w.run()
return w.confirmed
def credentialsReceive(mkey):
w = WHMgr(unicode(mkey), None, u'tcp:127.0.0.1:'+unicode(TOR_CLIENT_CONTROL_PORT))
w.receive()
w.run()
return w.data
def smptest(secret, sock, is_server):
global axolotl
# Create an SMP object with the calculated secret
smp = SMP(secret)
if is_server:
# Do the SMP protocol
buffer = sock.recv(2439, socket.MSG_WAITALL)
padlength = ord(buffer[-1:])
buffer = axolotl.decrypt(buffer[:-padlength])
buffer = smp.step2(buffer)
buffer = axolotl.encrypt(buffer)
padlength = 4539-len(buffer)
buffer = buffer+padlength*chr(padlength) # pad to fixed length
sock.send(buffer)
buffer = sock.recv(3469, socket.MSG_WAITALL)
padlength = ord(buffer[-1:])
buffer = axolotl.decrypt(buffer[:-padlength])
buffer = smp.step4(buffer)
buffer = axolotl.encrypt(buffer)
padlength = 1369-len(buffer)
buffer = buffer+padlength*chr(padlength) # pad to fixed length
sock.send(buffer)
else:
# Do the SMP protocol
buffer = smp.step1()
buffer = axolotl.encrypt(buffer)
padlength = 2439-len(buffer)
buffer = buffer+padlength*chr(padlength) # pad to fixed length
sock.send(buffer)
buffer = sock.recv(4539, socket.MSG_WAITALL)
padlength = ord(buffer[-1:])
buffer = axolotl.decrypt(buffer[:-padlength])
buffer = smp.step3(buffer)
buffer = axolotl.encrypt(buffer)
padlength = 3469-len(buffer)
buffer = buffer+padlength*chr(padlength) # pad to fixed length
sock.send(buffer)
buffer = sock.recv(1369, socket.MSG_WAITALL)
padlength = ord(buffer[-1:])
buffer = axolotl.decrypt(buffer[:-padlength])
smp.step5(buffer)
# Check if the secrets match
if smp.match:
print 'Secrets Match!'
sleep(1)
smp_match = True
else:
print 'Secrets DO NOT Match!'
smp_match = False
return smp_match
def doSMP(sock, is_server):
global axolotl
ans = raw_input('Run SMP authentication step? (y/N)? ')
if not ans == 'y': ans = 'N'
sock.send(axolotl.encrypt(ans))
data = sock.recv(125, socket.MSG_WAITALL)
data = axolotl.decrypt(data)
if ans == 'N' and data == 'y':
print 'Other party requested SMP authentication'
if ans == 'y' or data == 'y':
print 'Performing per-session SMP authentication...'
ans = getpass('Enter SMP secret: ')
print 'Running SMP protocol...'
secret = ans + axolotl.state['CONVid']
smp_match = smptest(secret, sock, is_server)
if not smp_match:
ans = raw_input('Continue? (y/N) ')
if ans != 'y':
print 'Exiting...'
sys.exit()
else:
print 'OK - skipping SMP step and assuming ' + \
'the other party is already authenticated...'
smp_match = True
sleep(2)
return smp_match
if __name__ == '__main__':
global axolotl
try:
mode = sys.argv[1]
except:
usage()
NICK = raw_input('Enter your nick: ')
OTHER_NICK = 'x'
winlock = threading.Lock()
transferlock = threading.Lock()
cryptlock = threading.Lock()
screen_needs_update = False
HOST = '127.0.0.1'
PORT=50000
mkey = getpass('What is the masterkey (format: NNN-xxxx)? ')
if mode == '-s':
axolotl = Axolotl(NICK,
dbname=OTHER_NICK+'.db',
dbpassphrase=None,
nonthreaded_sql=False)
axolotl.createState(other_name=OTHER_NICK,
mkey=hash_(mkey),
mode=False)
tor_process = tor(TOR_SERVER_PORT,
TOR_SERVER_CONTROL_PORT,
'/tmp/tor.server',
'')
hs, cookie, onion = ephemeralHiddenService()
print 'Exchanging credentials via tor...'
if credentialsSend(mkey,
cookie,
b2a(axolotl.state['DHRs']).strip(),
onion):
pass
else:
sys.exit(1)
print 'Credentials sent, waiting for the other party to connect...'
with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connected...'
smp_match = doSMP(conn, True)
chatThread(conn, smp_match, None)
elif mode == '-c':
axolotl = Axolotl(NICK,
dbname=OTHER_NICK+'.db',
dbpassphrase=None,
nonthreaded_sql=False)
tor_process = tor(TOR_CLIENT_PORT,
TOR_CLIENT_CONTROL_PORT,
'/tmp/tor.client',
'')
print 'Exchanging credentials via tor...'
creds = credentialsReceive(mkey)
if not creds:
print 'Master Key Mismatch!'
print 'Exiting...'
sys.exit()
cookie, rkey, onion = creds.split('___')
controller = clientController(cookie, onion)
axolotl.createState(other_name=OTHER_NICK,
mkey=hash_(mkey),
mode=True,
other_ratchetKey=a2b(rkey))
print 'Credentials received, connecting to the other party...'
with torcontext() as s:
s.connect((onion, PORT))
print 'Connected...'
smp_match = doSMP(s, False)
chatThread(s, smp_match, onion)
else:
usage()
| gpl-3.0 | 474,111,654,373,516,160 | 34.486291 | 86 | 0.573073 | false |
kawamuray/ganeti | lib/watcher/state.py | 2 | 7572 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Module keeping state for Ganeti watcher.
"""
import os
import time
import logging
from ganeti import utils
from ganeti import serializer
from ganeti import errors
# Delete any record that is older than 8 hours; this value is based on
# the fact that the current retry counter is 5, and watcher runs every
# 5 minutes, so it takes around half an hour to exceed the retry
# counter, so 8 hours (16*1/2h) seems like a reasonable reset time
RETRY_EXPIRATION = 8 * 3600
KEY_CLEANUP_COUNT = "cleanup_count"
KEY_CLEANUP_WHEN = "cleanup_when"
KEY_RESTART_COUNT = "restart_count"
KEY_RESTART_WHEN = "restart_when"
KEY_BOOT_ID = "bootid"
def OpenStateFile(path):
"""Opens the state file and acquires a lock on it.
@type path: string
@param path: Path to state file
"""
# The two-step dance below is necessary to allow both opening existing
# file read/write and creating if not existing. Vanilla open will truncate
# an existing file -or- allow creating if not existing.
statefile_fd = os.open(path, os.O_RDWR | os.O_CREAT)
# Try to acquire lock on state file. If this fails, another watcher instance
# might already be running or another program is temporarily blocking the
# watcher from running.
try:
utils.LockFile(statefile_fd)
except errors.LockError, err:
logging.error("Can't acquire lock on state file %s: %s", path, err)
return None
return os.fdopen(statefile_fd, "w+")
class WatcherState(object):
"""Interface to a state file recording restart attempts.
"""
def __init__(self, statefile):
"""Open, lock, read and parse the file.
@type statefile: file
@param statefile: State file object
"""
self.statefile = statefile
try:
state_data = self.statefile.read()
if not state_data:
self._data = {}
else:
self._data = serializer.Load(state_data)
except Exception, msg: # pylint: disable=W0703
# Ignore errors while loading the file and treat it as empty
self._data = {}
logging.warning(("Invalid state file. Using defaults."
" Error message: %s"), msg)
if "instance" not in self._data:
self._data["instance"] = {}
if "node" not in self._data:
self._data["node"] = {}
self._orig_data = serializer.Dump(self._data)
def Save(self, filename):
"""Save state to file, then unlock and close it.
"""
assert self.statefile
serialized_form = serializer.Dump(self._data)
if self._orig_data == serialized_form:
logging.debug("Data didn't change, just touching status file")
os.utime(filename, None)
return
# We need to make sure the file is locked before renaming it, otherwise
# starting ganeti-watcher again at the same time will create a conflict.
fd = utils.WriteFile(filename,
data=serialized_form,
prewrite=utils.LockFile, close=False)
self.statefile = os.fdopen(fd, "w+")
def Close(self):
"""Unlock configuration file and close it.
"""
assert self.statefile
# Files are automatically unlocked when closing them
self.statefile.close()
self.statefile = None
def GetNodeBootID(self, name):
"""Returns the last boot ID of a node or None.
"""
ndata = self._data["node"]
if name in ndata and KEY_BOOT_ID in ndata[name]:
return ndata[name][KEY_BOOT_ID]
return None
def SetNodeBootID(self, name, bootid):
"""Sets the boot ID of a node.
"""
assert bootid
ndata = self._data["node"]
ndata.setdefault(name, {})[KEY_BOOT_ID] = bootid
def NumberOfRestartAttempts(self, instance_name):
"""Returns number of previous restart attempts.
@type instance_name: string
@param instance_name: the name of the instance to look up
"""
idata = self._data["instance"]
if instance_name in idata:
return idata[instance_name][KEY_RESTART_COUNT]
return 0
def NumberOfCleanupAttempts(self, instance_name):
"""Returns number of previous cleanup attempts.
@type instance_name: string
@param instance_name: the name of the instance to look up
"""
idata = self._data["instance"]
if instance_name in idata:
return idata[instance_name][KEY_CLEANUP_COUNT]
return 0
def MaintainInstanceList(self, instances):
"""Perform maintenance on the recorded instances.
@type instances: list of string
@param instances: the list of currently existing instances
"""
idict = self._data["instance"]
# First, delete obsolete instances
obsolete_instances = set(idict).difference(instances)
for inst in obsolete_instances:
logging.debug("Forgetting obsolete instance %s", inst)
idict.pop(inst, None)
# Second, delete expired records
earliest = time.time() - RETRY_EXPIRATION
expired_instances = [i for i in idict
if idict[i][KEY_RESTART_WHEN] < earliest]
for inst in expired_instances:
logging.debug("Expiring record for instance %s", inst)
idict.pop(inst, None)
@staticmethod
def _RecordAttempt(instances, instance_name, key_when, key_count):
"""Record an event.
@type instances: dict
@param instances: contains instance data indexed by instance_name
@type instance_name: string
@param instance_name: name of the instance involved in the event
@type key_when:
@param key_when: dict key for the information for when the event occurred
@type key_count: int
@param key_count: dict key for the information for how many times
the event occurred
"""
instance = instances.setdefault(instance_name, {})
instance[key_when] = time.time()
instance[key_count] = instance.get(key_count, 0) + 1
def RecordRestartAttempt(self, instance_name):
"""Record a restart attempt.
@type instance_name: string
@param instance_name: the name of the instance being restarted
"""
self._RecordAttempt(self._data["instance"], instance_name,
KEY_RESTART_WHEN, KEY_RESTART_COUNT)
def RecordCleanupAttempt(self, instance_name):
"""Record a cleanup attempt.
@type instance_name: string
@param instance_name: the name of the instance being cleaned up
"""
self._RecordAttempt(self._data["instance"], instance_name,
KEY_CLEANUP_WHEN, KEY_CLEANUP_COUNT)
def RemoveInstance(self, instance_name):
"""Update state to reflect that a machine is running.
This method removes the record for a named instance (as we only
track down instances).
@type instance_name: string
@param instance_name: the name of the instance to remove from books
"""
idata = self._data["instance"]
idata.pop(instance_name, None)
| gpl-2.0 | -1,895,066,879,734,944,500 | 28.348837 | 78 | 0.67644 | false |
imclab/CoECI-CMS-Healthcare-Fraud-Prevention | partnerclient/decision_module/decision_module/helper.py | 3 | 1511 | '''
Copyright (C) 2013 TopCoder Inc., All Rights Reserved.
'''
'''
This is the module that provides useful helper functions.
Thread Safety:
The implementation is thread safe.
@author: TCSASSEMLBER
@version: 1.0
'''
def log_entrance(logger, signature, parasMap):
'''
Logs for entrance into public methods at DEBUG level.
@param logger: the logger object
@param signature: the method signature
@param parasMap: the passed parameters
'''
logger.debug('[Entering method ' + signature + ']')
if parasMap != None and len(parasMap.items()) > 0:
paraStr = '[Input parameters['
for (k,v) in parasMap.items():
paraStr += (str(k) + ':' + str(v) + ', ')
paraStr += ']]'
logger.debug(paraStr)
def log_exit(logger, signature, parasList):
'''
Logs for exit from public methods at DEBUG level.
@param logger: the logger object
@param signature: the method signature
@param parasList: the objects to return
'''
logger.debug('[Exiting method ' + signature + ']')
if parasList != None and len(parasList) > 0:
logger.debug('[Output parameter ' + str(parasList) + ']')
def log_exception(logger, signature, e):
'''
Logging exception at ERROR level.
@param logger: the logger object
@param signature: the method signature
@param e: the error
'''
# This will log the traceback.
logger.error('[Error in method ' + signature + ': Details ' + str(e) + ']')
return e
| apache-2.0 | 4,204,570,390,211,401,700 | 28.057692 | 79 | 0.628723 | false |
hoangt/tpzsimul.gem5 | src/arch/x86/isa/insts/simd128/integer/logical/pand.py | 91 | 3202 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PAND_XMM_XMM {
mand xmml, xmml, xmmlm
mand xmmh, xmmh, xmmhm
};
def macroop PAND_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mand xmml, xmml, ufp1
mand xmmh, xmmh, ufp2
};
def macroop PAND_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mand xmml, xmml, ufp1
mand xmmh, xmmh, ufp2
};
def macroop PANDN_XMM_XMM {
mandn xmml, xmml, xmmlm
mandn xmmh, xmmh, xmmhm
};
def macroop PANDN_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mandn xmml, xmml, ufp1
mandn xmmh, xmmh, ufp2
};
def macroop PANDN_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], dataSize=8
ldfp ufp2, seg, [1, t0, t1], 8, dataSize=8
mandn xmml, xmml, ufp1
mandn xmmh, xmmh, ufp2
};
'''
| bsd-3-clause | -389,914,660,292,749,500 | 38.04878 | 72 | 0.732355 | false |
arowla/csvkit | tests/test_unicsv.py | 21 | 6571 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
import os
import six
try:
import unittest2 as unittest
except ImportError:
import unittest
from csvkit import unicsv
@unittest.skipIf(six.PY3, "Not supported in Python 3.")
class TestUnicodeCSVReader(unittest.TestCase):
def test_utf8(self):
with open('examples/test_utf8.csv') as f:
reader = unicsv.UnicodeCSVReader(f, encoding='utf-8')
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'ʤ'])
def test_latin1(self):
with open('examples/test_latin1.csv') as f:
reader = unicsv.UnicodeCSVReader(f, encoding='latin1')
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'©'])
def test_utf16_big(self):
with open('examples/test_utf16_big.csv') as f:
reader = unicsv.UnicodeCSVReader(f, encoding='utf-16')
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'ʤ'])
def test_utf16_little(self):
with open('examples/test_utf16_little.csv') as f:
reader = unicsv.UnicodeCSVReader(f, encoding='utf-16')
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'ʤ'])
@unittest.skipIf(six.PY3, "Not supported in Python 3.")
class TestUnicodeCSVWriter(unittest.TestCase):
def test_utf8(self):
output = six.StringIO()
writer = unicsv.UnicodeCSVWriter(output, encoding='utf-8')
self.assertEqual(writer._eight_bit, True)
writer.writerow(['a', 'b', 'c'])
writer.writerow(['1', '2', '3'])
writer.writerow(['4', '5', u'ʤ'])
written = six.StringIO(output.getvalue())
reader = unicsv.UnicodeCSVReader(written, encoding='utf-8')
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'ʤ'])
def test_latin1(self):
output = six.StringIO()
writer = unicsv.UnicodeCSVWriter(output, encoding='latin1')
self.assertEqual(writer._eight_bit, True)
writer.writerow(['a', 'b', 'c'])
writer.writerow(['1', '2', '3'])
writer.writerow(['4', '5', u'©'])
written = six.StringIO(output.getvalue())
reader = unicsv.UnicodeCSVReader(written, encoding='latin1')
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'©'])
def test_utf16_big(self):
output = six.StringIO()
writer = unicsv.UnicodeCSVWriter(output, encoding='utf-16-be')
self.assertEqual(writer._eight_bit, False)
writer.writerow(['a', 'b', 'c'])
writer.writerow(['1', '2', '3'])
writer.writerow(['4', '5', u'ʤ'])
written = six.StringIO(output.getvalue())
reader = unicsv.UnicodeCSVReader(written, encoding='utf-16-be')
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'\u02A4'])
def test_utf16_little(self):
output = six.StringIO()
writer = unicsv.UnicodeCSVWriter(output, encoding='utf-16-le')
self.assertEqual(writer._eight_bit, False)
writer.writerow(['a', 'b', 'c'])
writer.writerow(['1', '2', '3'])
writer.writerow(['4', '5', u'ʤ'])
written = six.StringIO(output.getvalue())
reader = unicsv.UnicodeCSVReader(written, encoding='utf-16-le')
self.assertEqual(next(reader), ['a', 'b', 'c'])
self.assertEqual(next(reader), ['1', '2', '3'])
self.assertEqual(next(reader), ['4', '5', u'\u02A4'])
@unittest.skipIf(six.PY3, "Not supported in Python 3.")
class TestUnicodeCSVDictReader(unittest.TestCase):
def setUp(self):
self.f = open('examples/dummy.csv')
def tearDown(self):
self.f.close()
def test_reader(self):
reader = unicsv.UnicodeCSVDictReader(self.f)
self.assertEqual(next(reader), {
u'a': u'1',
u'b': u'2',
u'c': u'3'
})
def test_latin1(self):
with open('examples/test_latin1.csv') as f:
reader = unicsv.UnicodeCSVDictReader(f, encoding='latin1')
self.assertEqual(next(reader), {
u'a': u'1',
u'b': u'2',
u'c': u'3'
})
self.assertEqual(next(reader), {
u'a': u'4',
u'b': u'5',
u'c': u'©'
})
@unittest.skipIf(six.PY3, "Not supported in Python 3.")
class TestUnicodeCSVDictWriter(unittest.TestCase):
def setUp(self):
self.output = six.StringIO()
def tearDown(self):
self.output.close()
def test_writer(self):
writer = unicsv.UnicodeCSVDictWriter(self.output, ['a', 'b', 'c'], lineterminator='\n')
writer.writeheader()
writer.writerow({
u'a': u'1',
u'b': u'2',
u'c': u'☃'
})
result = self.output.getvalue()
self.assertEqual(result, 'a,b,c\n1,2,☃\n')
@unittest.skipIf(six.PY3, "Not supported in Python 3.")
class TestMaxFieldSize(unittest.TestCase):
def setUp(self):
self.lim = csv.field_size_limit()
with open('dummy.csv', 'w') as f:
f.write('a' * 10)
def tearDown(self):
# Resetting limit to avoid failure in other tests.
csv.field_size_limit(self.lim)
os.remove('dummy.csv')
def test_maxfieldsize(self):
# Testing --maxfieldsize for failure. Creating data using str * int.
with open('dummy.csv', 'r') as f:
c = unicsv.UnicodeCSVReader(f, maxfieldsize=9)
try:
c.next()
except unicsv.FieldSizeLimitError:
pass
else:
raise AssertionError('Expected unicsv.FieldSizeLimitError')
# Now testing higher --maxfieldsize.
with open('dummy.csv', 'r') as f:
c = unicsv.UnicodeCSVReader(f, maxfieldsize=11)
self.assertEqual(['a' * 10], c.next())
| mit | 5,686,014,005,932,710,000 | 33.87234 | 95 | 0.558267 | false |
shuangshuangwang/spark | python/pyspark/sql/window.py | 23 | 12863 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark import since, SparkContext
from pyspark.sql.column import _to_seq, _to_java_column
__all__ = ["Window", "WindowSpec"]
def _to_java_cols(cols):
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
return _to_seq(sc, cols, _to_java_column)
class Window(object):
"""
Utility functions for defining window in DataFrames.
.. versionadded:: 1.4
Notes
-----
When ordering is not defined, an unbounded window frame (rowFrame,
unboundedPreceding, unboundedFollowing) is used by default. When ordering is defined,
a growing window frame (rangeFrame, unboundedPreceding, currentRow) is used by default.
Examples
--------
>>> # ORDER BY date ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
>>> window = Window.orderBy("date").rowsBetween(Window.unboundedPreceding, Window.currentRow)
>>> # PARTITION BY country ORDER BY date RANGE BETWEEN 3 PRECEDING AND 3 FOLLOWING
>>> window = Window.orderBy("date").partitionBy("country").rangeBetween(-3, 3)
"""
_JAVA_MIN_LONG = -(1 << 63) # -9223372036854775808
_JAVA_MAX_LONG = (1 << 63) - 1 # 9223372036854775807
_PRECEDING_THRESHOLD = max(-sys.maxsize, _JAVA_MIN_LONG)
_FOLLOWING_THRESHOLD = min(sys.maxsize, _JAVA_MAX_LONG)
unboundedPreceding = _JAVA_MIN_LONG
unboundedFollowing = _JAVA_MAX_LONG
currentRow = 0
@staticmethod
@since(1.4)
def partitionBy(*cols):
"""
Creates a :class:`WindowSpec` with the partitioning defined.
"""
sc = SparkContext._active_spark_context
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.partitionBy(_to_java_cols(cols))
return WindowSpec(jspec)
@staticmethod
@since(1.4)
def orderBy(*cols):
"""
Creates a :class:`WindowSpec` with the ordering defined.
"""
sc = SparkContext._active_spark_context
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.orderBy(_to_java_cols(cols))
return WindowSpec(jspec)
@staticmethod
def rowsBetween(start, end):
"""
Creates a :class:`WindowSpec` with the frame boundaries defined,
from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative positions from the current row.
For example, "0" means "current row", while "-1" means the row before
the current row, and "5" means the fifth row after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
A row based boundary is based on the position of the row within the partition.
An offset indicates the number of rows above or below the current row, the frame for the
current row starts or ends. For instance, given a row based sliding frame with a lower bound
offset of -1 and a upper bound offset of +2. The frame for row with index 5 would range from
index 4 to index 7.
.. versionadded:: 2.1.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to -9223372036854775808.
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to 9223372036854775807.
Examples
--------
>>> from pyspark.sql import Window
>>> from pyspark.sql import functions as func
>>> from pyspark.sql import SQLContext
>>> sc = SparkContext.getOrCreate()
>>> sqlContext = SQLContext(sc)
>>> tup = [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")]
>>> df = sqlContext.createDataFrame(tup, ["id", "category"])
>>> window = Window.partitionBy("category").orderBy("id").rowsBetween(Window.currentRow, 1)
>>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category", "sum").show()
+---+--------+---+
| id|category|sum|
+---+--------+---+
| 1| a| 2|
| 1| a| 3|
| 1| b| 3|
| 2| a| 2|
| 2| b| 5|
| 3| b| 3|
+---+--------+---+
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
sc = SparkContext._active_spark_context
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.rowsBetween(start, end)
return WindowSpec(jspec)
@staticmethod
def rangeBetween(start, end):
"""
Creates a :class:`WindowSpec` with the frame boundaries defined,
from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative from the current row. For example,
"0" means "current row", while "-1" means one off before the current row,
and "5" means the five off after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
A range-based boundary is based on the actual value of the ORDER BY
expression(s). An offset is used to alter the value of the ORDER BY expression, for
instance if the current ORDER BY expression has a value of 10 and the lower bound offset
is -3, the resulting lower bound for the current row will be 10 - 3 = 7. This however puts a
number of constraints on the ORDER BY expressions: there can be only one expression and this
expression must have a numerical data type. An exception can be made when the offset is
unbounded, because no value modification is needed, in this case multiple and non-numeric
ORDER BY expression are allowed.
.. versionadded:: 2.1.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to max(-sys.maxsize, -9223372036854775808).
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to min(sys.maxsize, 9223372036854775807).
Examples
--------
>>> from pyspark.sql import Window
>>> from pyspark.sql import functions as func
>>> from pyspark.sql import SQLContext
>>> sc = SparkContext.getOrCreate()
>>> sqlContext = SQLContext(sc)
>>> tup = [(1, "a"), (1, "a"), (2, "a"), (1, "b"), (2, "b"), (3, "b")]
>>> df = sqlContext.createDataFrame(tup, ["id", "category"])
>>> window = Window.partitionBy("category").orderBy("id").rangeBetween(Window.currentRow, 1)
>>> df.withColumn("sum", func.sum("id").over(window)).sort("id", "category").show()
+---+--------+---+
| id|category|sum|
+---+--------+---+
| 1| a| 4|
| 1| a| 4|
| 1| b| 3|
| 2| a| 2|
| 2| b| 5|
| 3| b| 3|
+---+--------+---+
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
sc = SparkContext._active_spark_context
jspec = sc._jvm.org.apache.spark.sql.expressions.Window.rangeBetween(start, end)
return WindowSpec(jspec)
class WindowSpec(object):
"""
A window specification that defines the partitioning, ordering,
and frame boundaries.
Use the static methods in :class:`Window` to create a :class:`WindowSpec`.
.. versionadded:: 1.4.0
"""
def __init__(self, jspec):
self._jspec = jspec
def partitionBy(self, *cols):
"""
Defines the partitioning columns in a :class:`WindowSpec`.
.. versionadded:: 1.4.0
Parameters
----------
cols : str, :class:`Column` or list
names of columns or expressions
"""
return WindowSpec(self._jspec.partitionBy(_to_java_cols(cols)))
def orderBy(self, *cols):
"""
Defines the ordering columns in a :class:`WindowSpec`.
.. versionadded:: 1.4.0
Parameters
----------
cols : str, :class:`Column` or list
names of columns or expressions
"""
return WindowSpec(self._jspec.orderBy(_to_java_cols(cols)))
def rowsBetween(self, start, end):
"""
Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative positions from the current row.
For example, "0" means "current row", while "-1" means the row before
the current row, and "5" means the fifth row after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
.. versionadded:: 1.4.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to max(-sys.maxsize, -9223372036854775808).
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to min(sys.maxsize, 9223372036854775807).
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
return WindowSpec(self._jspec.rowsBetween(start, end))
def rangeBetween(self, start, end):
"""
Defines the frame boundaries, from `start` (inclusive) to `end` (inclusive).
Both `start` and `end` are relative from the current row. For example,
"0" means "current row", while "-1" means one off before the current row,
and "5" means the five off after the current row.
We recommend users use ``Window.unboundedPreceding``, ``Window.unboundedFollowing``,
and ``Window.currentRow`` to specify special boundary values, rather than using integral
values directly.
.. versionadded:: 1.4.0
Parameters
----------
start : int
boundary start, inclusive.
The frame is unbounded if this is ``Window.unboundedPreceding``, or
any value less than or equal to max(-sys.maxsize, -9223372036854775808).
end : int
boundary end, inclusive.
The frame is unbounded if this is ``Window.unboundedFollowing``, or
any value greater than or equal to min(sys.maxsize, 9223372036854775807).
"""
if start <= Window._PRECEDING_THRESHOLD:
start = Window.unboundedPreceding
if end >= Window._FOLLOWING_THRESHOLD:
end = Window.unboundedFollowing
return WindowSpec(self._jspec.rangeBetween(start, end))
def _test():
import doctest
import pyspark.sql.window
SparkContext('local[4]', 'PythonTest')
globs = pyspark.sql.window.__dict__.copy()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.window, globs=globs,
optionflags=doctest.NORMALIZE_WHITESPACE)
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | -6,153,041,852,712,496,000 | 37.743976 | 100 | 0.612532 | false |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.5/tests/regressiontests/admin_util/models.py | 115 | 1303 | from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class Article(models.Model):
"""
A simple Article model for testing
"""
site = models.ForeignKey('sites.Site', related_name="admin_articles")
title = models.CharField(max_length=100)
title2 = models.CharField(max_length=100, verbose_name="another name")
created = models.DateTimeField()
def test_from_model(self):
return "nothing"
def test_from_model_with_override(self):
return "nothing"
test_from_model_with_override.short_description = "not What you Expect"
@python_2_unicode_compatible
class Count(models.Model):
num = models.PositiveSmallIntegerField()
parent = models.ForeignKey('self', null=True)
def __str__(self):
return six.text_type(self.num)
class Event(models.Model):
date = models.DateTimeField(auto_now_add=True)
class Location(models.Model):
event = models.OneToOneField(Event, verbose_name='awesome event')
class Guest(models.Model):
event = models.OneToOneField(Event)
name = models.CharField(max_length=255)
class Meta:
verbose_name = "awesome guest"
class EventGuide(models.Model):
event = models.ForeignKey(Event, on_delete=models.DO_NOTHING)
| apache-2.0 | 1,174,637,831,499,287,800 | 28.613636 | 75 | 0.708365 | false |
Clyde-fare/scikit-learn | sklearn/metrics/ranking.py | 79 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause | 5,139,893,970,337,328,000 | 34.26491 | 79 | 0.623889 | false |
GunnerJnr/_CodeInstitute | Stream-3/Full-Stack-Development/19.Djangos-Testing-Framework/4.How-To-Test-Models/we_are_social/accounts/forms.py | 8 | 1889 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.core.exceptions import ValidationError
from accounts.models import User
class UserRegistrationForm(UserCreationForm):
MONTH_ABBREVIATIONS = [
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'June',
'July', 'Aug', 'Sept', 'Oct', 'Nov', 'Dec'
]
MONTH_CHOICES = list(enumerate(MONTH_ABBREVIATIONS, 1))
YEAR_CHOICES = [(i, i) for i in range(2017, 2038)]
credit_card_number = forms.CharField(label='Credit Card Number')
cvv = forms.CharField(label='Security Code (CVV)')
expiry_month = forms.ChoiceField(label="Month", choices=MONTH_CHOICES)
expiry_year = forms.ChoiceField(label="Year", choices=YEAR_CHOICES)
stripe_id = forms.CharField(widget=forms.HiddenInput)
password1 = forms.CharField(
label='Password',
widget=forms.PasswordInput
)
password2 = forms.CharField(
label='Password Confirmation',
widget=forms.PasswordInput
)
class Meta:
model = User
fields = ['email', 'password1', 'password2', 'stripe_id']
exclude = ['username']
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
message = "Passwords do not match"
raise ValidationError(message)
return password2
def save(self, commit=True):
instance = super(UserRegistrationForm, self).save(commit=False)
# automatically set to email address to create a unique identifier
instance.username = instance.email
if commit:
instance.save()
return instance
class UserLoginForm(forms.Form):
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
| mit | 404,499,046,444,078,000 | 29.967213 | 74 | 0.660667 | false |
atlassian/boto | tests/integration/ec2/elb/test_connection.py | 100 | 12554 | # Copyright (c) 2010 Hunter Blanks http://artifex.org/~hblanks/
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Initial, and very limited, unit tests for ELBConnection.
"""
import boto
import time
from tests.compat import unittest
from boto.ec2.elb import ELBConnection
import boto.ec2.elb
class ELBConnectionTest(unittest.TestCase):
ec2 = True
def setUp(self):
"""Creates a named load balancer that can be safely
deleted at the end of each test"""
self.conn = ELBConnection()
self.name = 'elb-boto-unit-test'
self.availability_zones = ['us-east-1a']
self.listeners = [(80, 8000, 'HTTP')]
self.balancer = self.conn.create_load_balancer(
self.name, self.availability_zones, self.listeners)
# S3 bucket for log tests
self.s3 = boto.connect_s3()
self.timestamp = str(int(time.time()))
self.bucket_name = 'boto-elb-%s' % self.timestamp
self.bucket = self.s3.create_bucket(self.bucket_name)
self.bucket.set_canned_acl('public-read-write')
self.addCleanup(self.cleanup_bucket, self.bucket)
def cleanup_bucket(self, bucket):
for key in bucket.get_all_keys():
key.delete()
bucket.delete()
def tearDown(self):
""" Deletes the test load balancer after every test.
It does not delete EVERY load balancer in your account"""
self.balancer.delete()
def test_build_list_params(self):
params = {}
self.conn.build_list_params(
params, ['thing1', 'thing2', 'thing3'], 'ThingName%d')
expected_params = {
'ThingName1': 'thing1',
'ThingName2': 'thing2',
'ThingName3': 'thing3'
}
self.assertEqual(params, expected_params)
# TODO: for these next tests, consider sleeping until our load
# balancer comes up, then testing for connectivity to
# balancer.dns_name, along the lines of the existing EC2 unit tests.
def test_create_load_balancer(self):
self.assertEqual(self.balancer.name, self.name)
self.assertEqual(self.balancer.availability_zones,
self.availability_zones)
self.assertEqual(self.balancer.listeners, self.listeners)
balancers = self.conn.get_all_load_balancers()
self.assertEqual([lb.name for lb in balancers], [self.name])
def test_create_load_balancer_listeners(self):
more_listeners = [(443, 8001, 'HTTP')]
self.conn.create_load_balancer_listeners(self.name, more_listeners)
balancers = self.conn.get_all_load_balancers()
self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(
sorted(l.get_tuple() for l in balancers[0].listeners),
sorted(self.listeners + more_listeners)
)
def test_delete_load_balancer_listeners(self):
mod_listeners = [(80, 8000, 'HTTP'), (443, 8001, 'HTTP')]
mod_name = self.name + "-mod"
self.mod_balancer = self.conn.create_load_balancer(
mod_name, self.availability_zones, mod_listeners)
mod_balancers = self.conn.get_all_load_balancers(
load_balancer_names=[mod_name])
self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
self.assertEqual(
sorted([l.get_tuple() for l in mod_balancers[0].listeners]),
sorted(mod_listeners))
self.conn.delete_load_balancer_listeners(self.mod_balancer.name, [443])
mod_balancers = self.conn.get_all_load_balancers(
load_balancer_names=[mod_name])
self.assertEqual([lb.name for lb in mod_balancers], [mod_name])
self.assertEqual([l.get_tuple() for l in mod_balancers[0].listeners],
mod_listeners[:1])
self.mod_balancer.delete()
def test_create_load_balancer_listeners_with_policies(self):
more_listeners = [(443, 8001, 'HTTP')]
self.conn.create_load_balancer_listeners(self.name, more_listeners)
lb_policy_name = 'lb-policy'
self.conn.create_lb_cookie_stickiness_policy(
1000, self.name, lb_policy_name)
self.conn.set_lb_policies_of_listener(
self.name, self.listeners[0][0], lb_policy_name)
app_policy_name = 'app-policy'
self.conn.create_app_cookie_stickiness_policy(
'appcookie', self.name, app_policy_name)
self.conn.set_lb_policies_of_listener(
self.name, more_listeners[0][0], app_policy_name)
balancers = self.conn.get_all_load_balancers(
load_balancer_names=[self.name])
self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(
sorted(l.get_tuple() for l in balancers[0].listeners),
sorted(self.listeners + more_listeners)
)
# Policy names should be checked here once they are supported
# in the Listener object.
def test_create_load_balancer_backend_with_policies(self):
other_policy_name = 'enable-proxy-protocol'
backend_port = 8081
self.conn.create_lb_policy(
self.name, other_policy_name,
'ProxyProtocolPolicyType', {'ProxyProtocol': True})
self.conn.set_lb_policies_of_backend_server(
self.name, backend_port, [other_policy_name])
balancers = self.conn.get_all_load_balancers(
load_balancer_names=[self.name])
self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(len(balancers[0].policies.other_policies), 1)
self.assertEqual(balancers[0].policies.other_policies[0].policy_name,
other_policy_name)
self.assertEqual(len(balancers[0].backends), 1)
self.assertEqual(balancers[0].backends[0].instance_port, backend_port)
self.assertEqual(balancers[0].backends[0].policies[0].policy_name,
other_policy_name)
self.conn.set_lb_policies_of_backend_server(self.name, backend_port,
[])
balancers = self.conn.get_all_load_balancers(
load_balancer_names=[self.name])
self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(len(balancers[0].policies.other_policies), 1)
self.assertEqual(len(balancers[0].backends), 0)
def test_create_load_balancer_complex_listeners(self):
complex_listeners = [
(8080, 80, 'HTTP', 'HTTP'),
(2525, 25, 'TCP', 'TCP'),
]
self.conn.create_load_balancer_listeners(
self.name,
complex_listeners=complex_listeners
)
balancers = self.conn.get_all_load_balancers(
load_balancer_names=[self.name]
)
self.assertEqual([lb.name for lb in balancers], [self.name])
self.assertEqual(
sorted(l.get_complex_tuple() for l in balancers[0].listeners),
# We need an extra 'HTTP' here over what ``self.listeners`` uses.
sorted([(80, 8000, 'HTTP', 'HTTP')] + complex_listeners)
)
def test_load_balancer_access_log(self):
attributes = self.balancer.get_attributes()
self.assertEqual(False, attributes.access_log.enabled)
attributes.access_log.enabled = True
attributes.access_log.s3_bucket_name = self.bucket_name
attributes.access_log.s3_bucket_prefix = 'access-logs'
attributes.access_log.emit_interval = 5
self.conn.modify_lb_attribute(self.balancer.name, 'accessLog',
attributes.access_log)
new_attributes = self.balancer.get_attributes()
self.assertEqual(True, new_attributes.access_log.enabled)
self.assertEqual(self.bucket_name,
new_attributes.access_log.s3_bucket_name)
self.assertEqual('access-logs',
new_attributes.access_log.s3_bucket_prefix)
self.assertEqual(5, new_attributes.access_log.emit_interval)
def test_load_balancer_get_attributes(self):
attributes = self.balancer.get_attributes()
connection_draining = self.conn.get_lb_attribute(self.balancer.name,
'ConnectionDraining')
self.assertEqual(connection_draining.enabled,
attributes.connection_draining.enabled)
self.assertEqual(connection_draining.timeout,
attributes.connection_draining.timeout)
access_log = self.conn.get_lb_attribute(self.balancer.name,
'AccessLog')
self.assertEqual(access_log.enabled, attributes.access_log.enabled)
self.assertEqual(access_log.s3_bucket_name,
attributes.access_log.s3_bucket_name)
self.assertEqual(access_log.s3_bucket_prefix,
attributes.access_log.s3_bucket_prefix)
self.assertEqual(access_log.emit_interval,
attributes.access_log.emit_interval)
cross_zone_load_balancing = self.conn.get_lb_attribute(
self.balancer.name, 'CrossZoneLoadBalancing')
self.assertEqual(cross_zone_load_balancing,
attributes.cross_zone_load_balancing.enabled)
def change_and_verify_load_balancer_connection_draining(
self, enabled, timeout=None):
attributes = self.balancer.get_attributes()
attributes.connection_draining.enabled = enabled
if timeout is not None:
attributes.connection_draining.timeout = timeout
self.conn.modify_lb_attribute(
self.balancer.name, 'ConnectionDraining',
attributes.connection_draining)
attributes = self.balancer.get_attributes()
self.assertEqual(enabled, attributes.connection_draining.enabled)
if timeout is not None:
self.assertEqual(timeout, attributes.connection_draining.timeout)
def test_load_balancer_connection_draining_config(self):
self.change_and_verify_load_balancer_connection_draining(True, 128)
self.change_and_verify_load_balancer_connection_draining(True, 256)
self.change_and_verify_load_balancer_connection_draining(False)
self.change_and_verify_load_balancer_connection_draining(True, 64)
def test_set_load_balancer_policies_of_listeners(self):
more_listeners = [(443, 8001, 'HTTP')]
self.conn.create_load_balancer_listeners(self.name, more_listeners)
lb_policy_name = 'lb-policy'
self.conn.create_lb_cookie_stickiness_policy(
1000,
self.name,
lb_policy_name
)
self.conn.set_lb_policies_of_listener(
self.name,
self.listeners[0][0],
lb_policy_name
)
# Try to remove the policy by passing empty list.
# http://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_SetLoadBalancerPoliciesOfListener.html
# documents this as the way to remove policies.
self.conn.set_lb_policies_of_listener(
self.name,
self.listeners[0][0],
[]
)
def test_can_make_sigv4_call(self):
connection = boto.ec2.elb.connect_to_region('eu-central-1')
lbs = connection.get_all_load_balancers()
self.assertTrue(isinstance(lbs, list))
if __name__ == '__main__':
unittest.main()
| mit | 4,166,769,582,607,696,400 | 41.26936 | 120 | 0.634061 | false |
suiyuan2009/tensorflow | tensorflow/python/kernel_tests/sparse_tensors_map_ops_test.py | 134 | 9952 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseTensorsMap."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: disable=protected-access
add_sparse_to_tensors_map = sparse_ops._add_sparse_to_tensors_map
add_many_sparse_to_tensors_map = sparse_ops._add_many_sparse_to_tensors_map
take_many_sparse_from_tensors_map = (
sparse_ops._take_many_sparse_from_tensors_map)
# pylint: enable=protected-access
class SparseTensorsMapTest(test.TestCase):
def _SparseTensorPlaceholder(self, dtype=None):
if dtype is None:
dtype = dtypes.int32
return sparse_tensor_lib.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtype), array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_3x4(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 2], [1, 3], [2, 2],
[2, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.int32)
ind = ind[permutation]
val = val[permutation]
shape = np.array([3, 4]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def _SparseTensorValue_1x1x1(self):
ind = np.array([[0, 0, 0]]).astype(np.int64)
val = np.array([0]).astype(np.int32)
shape = np.array([3, 4, 5]).astype(np.int64)
return sparse_tensor_lib.SparseTensorValue(ind, val, shape)
def testAddTakeMany(self):
with self.test_session(graph=ops.Graph(), use_gpu=False) as sess:
sp_input0 = self._SparseTensorValue_5x6(np.arange(6))
sp_input1 = self._SparseTensorValue_3x4(np.arange(6))
handle0 = add_sparse_to_tensors_map(sp_input0, shared_name="a")
handle1 = add_sparse_to_tensors_map(sp_input1, shared_name="a")
self.assertEqual(handle0.get_shape(), ())
handles_concat = array_ops.stack([handle0, handle1])
sp_out = take_many_sparse_from_tensors_map(
sparse_map_op=handle0.op, sparse_handles=handles_concat)
combined_indices, combined_values, combined_shape = sess.run(sp_out)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], sp_input0[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], sp_input1[0])
self.assertAllEqual(combined_values[:6], sp_input0[1])
self.assertAllEqual(combined_values[6:], sp_input1[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testFeedAddTakeMany(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_3x4(np.arange(6))
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
sparse_handles = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=sparse_handles)
combined_indices, combined_values, combined_shape = sess.run(sp_roundtrip)
self.assertAllEqual(combined_indices[:6, 0], [0] * 6) # minibatch 0
self.assertAllEqual(combined_indices[:6, 1:], input0_val[0])
self.assertAllEqual(combined_indices[6:, 0], [1] * 6) # minibatch 1
self.assertAllEqual(combined_indices[6:, 1:], input1_val[0])
self.assertAllEqual(combined_values[:6], input0_val[1])
self.assertAllEqual(combined_values[6:], input1_val[1])
self.assertAllEqual(combined_shape, [2, 5, 6])
def testAddManyTakeManyRoundTrip(self):
with self.test_session(use_gpu=False) as sess:
# N == 4 because shape_value == [4, 5]
indices_value = np.array([[0, 0], [0, 1], [2, 0]], dtype=np.int64)
values_value = np.array([b"a", b"b", b"c"])
shape_value = np.array([4, 5], dtype=np.int64)
sparse_tensor = self._SparseTensorPlaceholder(dtype=dtypes.string)
handles = add_many_sparse_to_tensors_map(sparse_tensor)
roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handles.op, sparse_handles=handles)
handles_value, roundtrip_value = sess.run(
[handles, roundtrip],
feed_dict={
sparse_tensor.indices: indices_value,
sparse_tensor.values: values_value,
sparse_tensor.dense_shape: shape_value
})
self.assertEqual(handles_value.shape, (4,))
self.assertAllEqual(roundtrip_value.indices, indices_value)
self.assertAllEqual(roundtrip_value.values, values_value)
self.assertAllEqual(roundtrip_value.dense_shape, shape_value)
def testDeserializeFailsInconsistentRank(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input0_val = self._SparseTensorValue_5x6(np.arange(6))
input1_val = self._SparseTensorValue_1x1x1()
handle = add_sparse_to_tensors_map(sp_input)
handle0_value = sess.run(handle, feed_dict={sp_input: input0_val})
handle1_value = sess.run(handle, feed_dict={sp_input: input1_val})
handle_concat = ops.convert_to_tensor(
[handle0_value, handle1_value], dtype=dtypes.int64)
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=handle_concat)
with self.assertRaisesOpError(
r"Inconsistent rank across SparseTensors: rank prior to "
r"SparseTensor\[1\] was: 3 but rank of SparseTensor\[1\] is: 4"):
sess.run(sp_roundtrip)
def testTakeManyFailsWrongInputOp(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
handle = add_sparse_to_tensors_map(input_val)
handle_value = sess.run(handle)
bad_handle = handle_value + 10
sp_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=handle.op, sparse_handles=[handle_value, bad_handle])
with self.assertRaisesOpError(r"Unable to find SparseTensor: 10"):
sess.run(sp_roundtrip)
class BenchmarkSparseTensorsMapVsSerialization(test.Benchmark):
def benchmarkVeryLarge2DFloatSparseTensor(self):
np.random.seed(127)
num_elements = 10000
batch_size = 64
indices_batch = np.random.randint(
batch_size, size=num_elements, dtype=np.int64)
indices_value = np.arange(num_elements, dtype=np.int64)
indices = np.asarray(
sorted(zip(indices_batch, indices_value)), dtype=np.int64)
values = ["feature_value_for_embedding_lookup"] * num_elements
shape = np.asarray([batch_size, num_elements], dtype=np.int64)
with session.Session() as sess:
with ops.device("/cpu:0"):
indices = variables.Variable(indices)
values = variables.Variable(values)
shape = variables.Variable(shape)
st = sparse_tensor_lib.SparseTensor(indices, values, shape)
st_handles = add_many_sparse_to_tensors_map(st)
st_roundtrip = take_many_sparse_from_tensors_map(
sparse_map_op=st_handles.op, sparse_handles=st_handles)
st_roundtrip_op = st_roundtrip.values.op
st_serialized = sparse_ops.serialize_many_sparse(st)
st_deserialized = sparse_ops.deserialize_many_sparse(
st_serialized, dtype=values.dtype)
st_deserialized_op = st_deserialized.values.op
variables.global_variables_initializer().run()
st_roundtrip_values = sess.run(st_roundtrip)
st_deserialized_values = sess.run(st_deserialized)
np.testing.assert_equal(st_roundtrip_values.values,
st_deserialized_values.values)
np.testing.assert_equal(st_roundtrip_values.indices,
st_deserialized_values.indices)
np.testing.assert_equal(st_roundtrip_values.dense_shape,
st_deserialized_values.dense_shape)
self.run_op_benchmark(
sess,
st_roundtrip_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_tensor_maps")
self.run_op_benchmark(
sess,
st_deserialized_op,
min_iters=2000,
name="benchmark_very_large_2d_float_st_serialization")
if __name__ == "__main__":
test.main()
| apache-2.0 | -4,384,690,586,506,636,000 | 41.169492 | 80 | 0.665092 | false |
wrouesnel/ansible | lib/ansible/plugins/netconf/__init__.py | 4 | 10724 | #
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from abc import ABCMeta, abstractmethod
from functools import wraps
from ansible.errors import AnsibleError
from ansible.module_utils.six import with_metaclass
from ansible.module_utils._text import to_bytes
try:
from ncclient.operations import RPCError
from ncclient.xml_ import to_xml, to_ele
except ImportError:
raise AnsibleError("ncclient is not installed")
def ensure_connected(func):
@wraps(func)
def wrapped(self, *args, **kwargs):
if not self._connection._connected:
self._connection._connect()
return func(self, *args, **kwargs)
return wrapped
class NetconfBase(with_metaclass(ABCMeta, object)):
"""
A base class for implementing Netconf connections
.. note:: Unlike most of Ansible, nearly all strings in
:class:`TerminalBase` plugins are byte strings. This is because of
how close to the underlying platform these plugins operate. Remember
to mark literal strings as byte string (``b"string"``) and to use
:func:`~ansible.module_utils._text.to_bytes` and
:func:`~ansible.module_utils._text.to_text` to avoid unexpected
problems.
List of supported rpc's:
:get: Retrieves running configuration and device state information
:get_config: Retrieves the specified configuration from the device
:edit_config: Loads the specified commands into the remote device
:commit: Load configuration from candidate to running
:discard_changes: Discard changes to candidate datastore
:validate: Validate the contents of the specified configuration.
:lock: Allows the client to lock the configuration system of a device.
:unlock: Release a configuration lock, previously obtained with the lock operation.
:copy_config: create or replace an entire configuration datastore with the contents of another complete
configuration datastore.
:get-schema: Retrieves the required schema from the device
:get_capabilities: Retrieves device information and supported rpc methods
For JUNOS:
:execute_rpc: RPC to be execute on remote device
:load_configuration: Loads given configuration on device
Note: rpc support depends on the capabilites of remote device.
:returns: Returns output received from remote device as byte string
Note: the 'result' or 'error' from response should to be converted to object
of ElementTree using 'fromstring' to parse output as xml doc
'get_capabilities()' returns 'result' as a json string.
Usage:
from ansible.module_utils.connection import Connection
conn = Connection()
data = conn.execute_rpc(rpc)
reply = fromstring(reply)
data = conn.get_capabilities()
json.loads(data)
conn.load_configuration(config=[''set system ntp server 1.1.1.1''], action='set', format='text')
"""
def __init__(self, connection):
self._connection = connection
self.m = self._connection._manager
@ensure_connected
def rpc(self, name):
"""RPC to be execute on remote device
:name: Name of rpc in string format"""
try:
obj = to_ele(to_bytes(name, errors='surrogate_or_strict'))
resp = self.m.rpc(obj)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
except RPCError as exc:
msg = exc.data_xml if hasattr(exc, 'data_xml') else exc.xml
raise Exception(to_xml(msg))
@ensure_connected
def get_config(self, *args, **kwargs):
"""Retrieve all or part of a specified configuration.
:source: name of the configuration datastore being queried
:filter: specifies the portion of the configuration to retrieve
(by default entire configuration is retrieved)"""
resp = self.m.get_config(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def get(self, *args, **kwargs):
"""Retrieve running configuration and device state information.
*filter* specifies the portion of the configuration to retrieve
(by default entire configuration is retrieved)
"""
resp = self.m.get(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def edit_config(self, *args, **kwargs):
"""Loads all or part of the specified *config* to the *target* configuration datastore.
:target: is the name of the configuration datastore being edited
:config: is the configuration, which must be rooted in the `config` element.
It can be specified either as a string or an :class:`~xml.etree.ElementTree.Element`.
:default_operation: if specified must be one of { `"merge"`, `"replace"`, or `"none"` }
:test_option: if specified must be one of { `"test_then_set"`, `"set"` }
:error_option: if specified must be one of { `"stop-on-error"`, `"continue-on-error"`, `"rollback-on-error"` }
The `"rollback-on-error"` *error_option* depends on the `:rollback-on-error` capability.
"""
resp = self.m.edit_config(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def validate(self, *args, **kwargs):
"""Validate the contents of the specified configuration.
:source: is the name of the configuration datastore being validated or `config`
element containing the configuration subtree to be validated
"""
resp = self.m.validate(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def copy_config(self, *args, **kwargs):
"""Create or replace an entire configuration datastore with the contents of another complete
configuration datastore.
:source: is the name of the configuration datastore to use as the source of the
copy operation or `config` element containing the configuration subtree to copy
:target: is the name of the configuration datastore to use as the destination of the copy operation"""
resp = self.m.copy_config(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def lock(self, *args, **kwargs):
"""Allows the client to lock the configuration system of a device.
*target* is the name of the configuration datastore to lock
"""
resp = self.m.lock(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def unlock(self, *args, **kwargs):
"""Release a configuration lock, previously obtained with the lock operation.
:target: is the name of the configuration datastore to unlock
"""
resp = self.m.unlock(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def discard_changes(self, *args, **kwargs):
"""Revert the candidate configuration to the currently running configuration.
Any uncommitted changes are discarded."""
resp = self.m.discard_changes(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def commit(self, *args, **kwargs):
"""Commit the candidate configuration as the device's new current configuration.
Depends on the `:candidate` capability.
A confirmed commit (i.e. if *confirmed* is `True`) is reverted if there is no
followup commit within the *timeout* interval. If no timeout is specified the
confirm timeout defaults to 600 seconds (10 minutes).
A confirming commit may have the *confirmed* parameter but this is not required.
Depends on the `:confirmed-commit` capability.
:confirmed: whether this is a confirmed commit
:timeout: specifies the confirm timeout in seconds
"""
resp = self.m.commit(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def validate(self, *args, **kwargs):
"""Validate the contents of the specified configuration.
:source: name of configuration data store"""
resp = self.m.validate(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def get_schema(self, *args, **kwargs):
"""Retrieves the required schema from the device
"""
resp = self.m.get_schema(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@ensure_connected
def locked(self, *args, **kwargs):
resp = self.m.locked(*args, **kwargs)
return resp.data_xml if hasattr(resp, 'data_xml') else resp.xml
@abstractmethod
def get_capabilities(self):
"""Retrieves device information and supported
rpc methods by device platform and return result
as a string
"""
pass
@staticmethod
def guess_network_os(obj):
"""Identifies the operating system of
network device.
"""
pass
def get_base_rpc(self):
"""Returns list of base rpc method supported by remote device"""
return ['get_config', 'edit_config', 'get_capabilities', 'get']
def put_file(self, source, destination):
"""Copies file over scp to remote device"""
pass
def fetch_file(self, source, destination):
"""Fetch file over scp from remote device"""
pass
# TODO Restore .xml, when ncclient supports it for all platforms
| gpl-3.0 | -9,121,203,277,571,318,000 | 42.241935 | 122 | 0.654233 | false |
BtbN/xbmc | lib/gtest/test/gtest_output_test.py | 1733 | 12005 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO([email protected]): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| gpl-2.0 | -6,145,831,389,545,537,000 | 34.835821 | 79 | 0.675635 | false |
skulbrane/metagoofil | hachoir_parser/program/exe_res.py | 95 | 15292 | """
Parser for resource of Microsoft Windows Portable Executable (PE).
Documentation:
- Wine project
VS_FIXEDFILEINFO structure, file include/winver.h
Author: Victor Stinner
Creation date: 2007-01-19
"""
from hachoir_core.field import (FieldSet, ParserError, Enum,
Bit, Bits, SeekableFieldSet,
UInt16, UInt32, TimestampUnix32,
RawBytes, PaddingBytes, NullBytes, NullBits,
CString, String)
from hachoir_core.text_handler import textHandler, filesizeHandler, hexadecimal
from hachoir_core.tools import createDict, paddingSize, alignValue, makePrintable
from hachoir_core.error import HACHOIR_ERRORS
from hachoir_parser.common.win32 import BitmapInfoHeader
MAX_DEPTH = 5
MAX_INDEX_PER_HEADER = 300
MAX_NAME_PER_HEADER = MAX_INDEX_PER_HEADER
class Version(FieldSet):
static_size = 32
def createFields(self):
yield textHandler(UInt16(self, "minor", "Minor version number"), hexadecimal)
yield textHandler(UInt16(self, "major", "Major version number"), hexadecimal)
def createValue(self):
return self["major"].value + float(self["minor"].value) / 10000
MAJOR_OS_NAME = {
1: "DOS",
2: "OS/2 16-bit",
3: "OS/2 32-bit",
4: "Windows NT",
}
MINOR_OS_BASE = 0
MINOR_OS_NAME = {
0: "Base",
1: "Windows 16-bit",
2: "Presentation Manager 16-bit",
3: "Presentation Manager 32-bit",
4: "Windows 32-bit",
}
FILETYPE_DRIVER = 3
FILETYPE_FONT = 4
FILETYPE_NAME = {
1: "Application",
2: "DLL",
3: "Driver",
4: "Font",
5: "VXD",
7: "Static library",
}
DRIVER_SUBTYPE_NAME = {
1: "Printer",
2: "Keyboard",
3: "Language",
4: "Display",
5: "Mouse",
6: "Network",
7: "System",
8: "Installable",
9: "Sound",
10: "Communications",
}
FONT_SUBTYPE_NAME = {
1: "Raster",
2: "Vector",
3: "TrueType",
}
class VersionInfoBinary(FieldSet):
def createFields(self):
yield textHandler(UInt32(self, "magic", "File information magic (0xFEEF04BD)"), hexadecimal)
if self["magic"].value != 0xFEEF04BD:
raise ParserError("EXE resource: invalid file info magic")
yield Version(self, "struct_ver", "Structure version (1.0)")
yield Version(self, "file_ver_ms", "File version MS")
yield Version(self, "file_ver_ls", "File version LS")
yield Version(self, "product_ver_ms", "Product version MS")
yield Version(self, "product_ver_ls", "Product version LS")
yield textHandler(UInt32(self, "file_flags_mask"), hexadecimal)
yield Bit(self, "debug")
yield Bit(self, "prerelease")
yield Bit(self, "patched")
yield Bit(self, "private_build")
yield Bit(self, "info_inferred")
yield Bit(self, "special_build")
yield NullBits(self, "reserved", 26)
yield Enum(textHandler(UInt16(self, "file_os_major"), hexadecimal), MAJOR_OS_NAME)
yield Enum(textHandler(UInt16(self, "file_os_minor"), hexadecimal), MINOR_OS_NAME)
yield Enum(textHandler(UInt32(self, "file_type"), hexadecimal), FILETYPE_NAME)
field = textHandler(UInt32(self, "file_subfile"), hexadecimal)
if field.value == FILETYPE_DRIVER:
field = Enum(field, DRIVER_SUBTYPE_NAME)
elif field.value == FILETYPE_FONT:
field = Enum(field, FONT_SUBTYPE_NAME)
yield field
yield TimestampUnix32(self, "date_ms")
yield TimestampUnix32(self, "date_ls")
class VersionInfoNode(FieldSet):
TYPE_STRING = 1
TYPE_NAME = {
0: "binary",
1: "string",
}
def __init__(self, parent, name, is_32bit=True):
FieldSet.__init__(self, parent, name)
self._size = alignValue(self["size"].value, 4) * 8
self.is_32bit = is_32bit
def createFields(self):
yield UInt16(self, "size", "Node size (in bytes)")
yield UInt16(self, "data_size")
yield Enum(UInt16(self, "type"), self.TYPE_NAME)
yield CString(self, "name", charset="UTF-16-LE")
size = paddingSize(self.current_size//8, 4)
if size:
yield NullBytes(self, "padding[]", size)
size = self["data_size"].value
if size:
if self["type"].value == self.TYPE_STRING:
if self.is_32bit:
size *= 2
yield String(self, "value", size, charset="UTF-16-LE", truncate="\0")
elif self["name"].value == "VS_VERSION_INFO":
yield VersionInfoBinary(self, "value", size=size*8)
if self["value/file_flags_mask"].value == 0:
self.is_32bit = False
else:
yield RawBytes(self, "value", size)
while 12 <= (self.size - self.current_size) // 8:
yield VersionInfoNode(self, "node[]", self.is_32bit)
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding[]", size)
def createDescription(self):
text = "Version info node: %s" % self["name"].value
if self["type"].value == self.TYPE_STRING and "value" in self:
text += "=%s" % self["value"].value
return text
def parseVersionInfo(parent):
yield VersionInfoNode(parent, "node[]")
def parseIcon(parent):
yield BitmapInfoHeader(parent, "bmp_header")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "raw", size)
class WindowsString(FieldSet):
def createFields(self):
yield UInt16(self, "length", "Number of 16-bit characters")
size = self["length"].value * 2
if size:
yield String(self, "text", size, charset="UTF-16-LE")
def createValue(self):
if "text" in self:
return self["text"].value
else:
return u""
def createDisplay(self):
return makePrintable(self.value, "UTF-8", to_unicode=True, quote='"')
def parseStringTable(parent):
while not parent.eof:
yield WindowsString(parent, "string[]")
RESOURCE_TYPE = {
1: ("cursor[]", "Cursor", None),
2: ("bitmap[]", "Bitmap", None),
3: ("icon[]", "Icon", parseIcon),
4: ("menu[]", "Menu", None),
5: ("dialog[]", "Dialog", None),
6: ("string_table[]", "String table", parseStringTable),
7: ("font_dir[]", "Font directory", None),
8: ("font[]", "Font", None),
9: ("accelerators[]", "Accelerators", None),
10: ("raw_res[]", "Unformatted resource data", None),
11: ("message_table[]", "Message table", None),
12: ("group_cursor[]", "Group cursor", None),
14: ("group_icon[]", "Group icon", None),
16: ("version_info", "Version information", parseVersionInfo),
}
class Entry(FieldSet):
static_size = 16*8
def __init__(self, parent, name, inode=None):
FieldSet.__init__(self, parent, name)
self.inode = inode
def createFields(self):
yield textHandler(UInt32(self, "rva"), hexadecimal)
yield filesizeHandler(UInt32(self, "size"))
yield UInt32(self, "codepage")
yield NullBytes(self, "reserved", 4)
def createDescription(self):
return "Entry #%u: offset=%s size=%s" % (
self.inode["offset"].value, self["rva"].display, self["size"].display)
class NameOffset(FieldSet):
def createFields(self):
yield UInt32(self, "name")
yield Bits(self, "offset", 31)
yield Bit(self, "is_name")
class IndexOffset(FieldSet):
TYPE_DESC = createDict(RESOURCE_TYPE, 1)
def __init__(self, parent, name, res_type=None):
FieldSet.__init__(self, parent, name)
self.res_type = res_type
def createFields(self):
yield Enum(UInt32(self, "type"), self.TYPE_DESC)
yield Bits(self, "offset", 31)
yield Bit(self, "is_subdir")
def createDescription(self):
if self["is_subdir"].value:
return "Sub-directory: %s at %s" % (self["type"].display, self["offset"].value)
else:
return "Index: ID %s at %s" % (self["type"].display, self["offset"].value)
class ResourceContent(FieldSet):
def __init__(self, parent, name, entry, size=None):
FieldSet.__init__(self, parent, name, size=entry["size"].value*8)
self.entry = entry
res_type = self.getResType()
if res_type in RESOURCE_TYPE:
self._name, description, self._parser = RESOURCE_TYPE[res_type]
else:
self._parser = None
def getResID(self):
return self.entry.inode["offset"].value
def getResType(self):
return self.entry.inode.res_type
def createFields(self):
if self._parser:
for field in self._parser(self):
yield field
else:
yield RawBytes(self, "content", self.size//8)
def createDescription(self):
return "Resource #%u content: type=%s" % (
self.getResID(), self.getResType())
class Header(FieldSet):
static_size = 16*8
def createFields(self):
yield NullBytes(self, "options", 4)
yield TimestampUnix32(self, "creation_date")
yield UInt16(self, "maj_ver", "Major version")
yield UInt16(self, "min_ver", "Minor version")
yield UInt16(self, "nb_name", "Number of named entries")
yield UInt16(self, "nb_index", "Number of indexed entries")
def createDescription(self):
text = "Resource header"
info = []
if self["nb_name"].value:
info.append("%u name" % self["nb_name"].value)
if self["nb_index"].value:
info.append("%u index" % self["nb_index"].value)
if self["creation_date"].value:
info.append(self["creation_date"].display)
if info:
return "%s: %s" % (text, ", ".join(info))
else:
return text
class Name(FieldSet):
def createFields(self):
yield UInt16(self, "length")
size = min(self["length"].value, 255)
if size:
yield String(self, "name", size, charset="UTF-16LE")
class Directory(FieldSet):
def __init__(self, parent, name, res_type=None):
FieldSet.__init__(self, parent, name)
nb_entries = self["header/nb_name"].value + self["header/nb_index"].value
self._size = Header.static_size + nb_entries * 64
self.res_type = res_type
def createFields(self):
yield Header(self, "header")
if MAX_NAME_PER_HEADER < self["header/nb_name"].value:
raise ParserError("EXE resource: invalid number of name (%s)"
% self["header/nb_name"].value)
if MAX_INDEX_PER_HEADER < self["header/nb_index"].value:
raise ParserError("EXE resource: invalid number of index (%s)"
% self["header/nb_index"].value)
hdr = self["header"]
for index in xrange(hdr["nb_name"].value):
yield NameOffset(self, "name[]")
for index in xrange(hdr["nb_index"].value):
yield IndexOffset(self, "index[]", self.res_type)
def createDescription(self):
return self["header"].description
class PE_Resource(SeekableFieldSet):
def __init__(self, parent, name, section, size):
SeekableFieldSet.__init__(self, parent, name, size=size)
self.section = section
def parseSub(self, directory, name, depth):
indexes = []
for index in directory.array("index"):
if index["is_subdir"].value:
indexes.append(index)
#indexes.sort(key=lambda index: index["offset"].value)
for index in indexes:
self.seekByte(index["offset"].value)
if depth == 1:
res_type = index["type"].value
else:
res_type = directory.res_type
yield Directory(self, name, res_type)
def createFields(self):
# Parse directories
depth = 0
subdir = Directory(self, "root")
yield subdir
subdirs = [subdir]
alldirs = [subdir]
while subdirs:
depth += 1
if MAX_DEPTH < depth:
self.error("EXE resource: depth too high (%s), stop parsing directories" % depth)
break
newsubdirs = []
for index, subdir in enumerate(subdirs):
name = "directory[%u][%u][]" % (depth, index)
try:
for field in self.parseSub(subdir, name, depth):
if field.__class__ == Directory:
newsubdirs.append(field)
yield field
except HACHOIR_ERRORS, err:
self.error("Unable to create directory %s: %s" % (name, err))
subdirs = newsubdirs
alldirs.extend(subdirs)
# Create resource list
resources = []
for directory in alldirs:
for index in directory.array("index"):
if not index["is_subdir"].value:
resources.append(index)
# Parse entries
entries = []
for resource in resources:
offset = resource["offset"].value
if offset is None:
continue
self.seekByte(offset)
entry = Entry(self, "entry[]", inode=resource)
yield entry
entries.append(entry)
entries.sort(key=lambda entry: entry["rva"].value)
# Parse resource content
for entry in entries:
try:
offset = self.section.rva2file(entry["rva"].value)
padding = self.seekByte(offset, relative=False)
if padding:
yield padding
yield ResourceContent(self, "content[]", entry)
except HACHOIR_ERRORS, err:
self.warning("Error when parsing entry %s: %s" % (entry.path, err))
size = (self.size - self.current_size) // 8
if size:
yield PaddingBytes(self, "padding_end", size)
class NE_VersionInfoNode(FieldSet):
TYPE_STRING = 1
TYPE_NAME = {
0: "binary",
1: "string",
}
def __init__(self, parent, name):
FieldSet.__init__(self, parent, name)
self._size = alignValue(self["size"].value, 4) * 8
def createFields(self):
yield UInt16(self, "size", "Node size (in bytes)")
yield UInt16(self, "data_size")
yield CString(self, "name", charset="ISO-8859-1")
size = paddingSize(self.current_size//8, 4)
if size:
yield NullBytes(self, "padding[]", size)
size = self["data_size"].value
if size:
if self["name"].value == "VS_VERSION_INFO":
yield VersionInfoBinary(self, "value", size=size*8)
else:
yield String(self, "value", size, charset="ISO-8859-1")
while 12 <= (self.size - self.current_size) // 8:
yield NE_VersionInfoNode(self, "node[]")
size = (self.size - self.current_size) // 8
if size:
yield NullBytes(self, "padding[]", size)
def createDescription(self):
text = "Version info node: %s" % self["name"].value
# if self["type"].value == self.TYPE_STRING and "value" in self:
# text += "=%s" % self["value"].value
return text
| gpl-2.0 | 6,479,351,689,779,782,000 | 33.364045 | 100 | 0.576053 | false |
chdecultot/erpnext | erpnext/hr/doctype/employee_attendance_tool/employee_attendance_tool.py | 7 | 1956 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.model.document import Document
class EmployeeAttendanceTool(Document):
pass
@frappe.whitelist()
def get_employees(date, department = None, branch = None, company = None):
attendance_not_marked = []
attendance_marked = []
filters = {"status": "Active", "date_of_joining": ["<=", date]}
if department != "All":
filters["department"] = department
if branch != "All":
filters["branch"] = branch
if company != "All":
filters["company"] = company
employee_list = frappe.get_list("Employee", fields=["employee", "employee_name"], filters=filters, order_by="employee_name")
marked_employee = {}
for emp in frappe.get_list("Attendance", fields=["employee", "status"],
filters={"attendance_date": date}):
marked_employee[emp['employee']] = emp['status']
for employee in employee_list:
employee['status'] = marked_employee.get(employee['employee'])
if employee['employee'] not in marked_employee:
attendance_not_marked.append(employee)
else:
attendance_marked.append(employee)
return {
"marked": attendance_marked,
"unmarked": attendance_not_marked
}
@frappe.whitelist()
def mark_employee_attendance(employee_list, status, date, leave_type=None, company=None):
employee_list = json.loads(employee_list)
for employee in employee_list:
attendance = frappe.new_doc("Attendance")
attendance.employee = employee['employee']
attendance.employee_name = employee['employee_name']
attendance.attendance_date = date
attendance.status = status
if status == "On Leave" and leave_type:
attendance.leave_type = leave_type
if company:
attendance.company = company
else:
attendance.company = frappe.db.get_value("Employee", employee['employee'], "Company")
attendance.submit()
| gpl-3.0 | -8,111,983,554,132,141,000 | 31.6 | 125 | 0.714724 | false |
BT-ojossen/odoo | addons/crm_helpdesk/__openerp__.py | 260 | 1956 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Helpdesk',
'category': 'Customer Relationship Management',
'version': '1.0',
'description': """
Helpdesk Management.
====================
Like records and processing of claims, Helpdesk and Support are good tools
to trace your interventions. This menu is more adapted to oral communication,
which is not necessarily related to a claim. Select a customer, add notes
and categorize your interventions with a channel and a priority level.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['crm'],
'data': [
'crm_helpdesk_view.xml',
'crm_helpdesk_menu.xml',
'security/ir.model.access.csv',
'report/crm_helpdesk_report_view.xml',
'crm_helpdesk_data.xml',
],
'demo': ['crm_helpdesk_demo.xml'],
'test': ['test/process/help-desk.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -5,126,758,670,057,373,000 | 36.615385 | 78 | 0.618098 | false |
jaimahajan1997/sympy | sympy/physics/quantum/density.py | 82 | 9834 | from __future__ import print_function, division
from itertools import product
from sympy import Tuple, Add, Mul, Matrix, log, expand, Rational
from sympy.core.trace import Tr
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.operator import HermitianOperator
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.matrixutils import numpy_ndarray, scipy_sparse_matrix, to_numpy
from sympy.physics.quantum.tensorproduct import TensorProduct, tensor_product_simp
class Density(HermitianOperator):
"""Density operator for representing mixed states.
TODO: Density operator support for Qubits
Parameters
==========
values : tuples/lists
Each tuple/list should be of form (state, prob) or [state,prob]
Examples
========
Create a density operator with 2 states represented by Kets.
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d
'Density'((|0>, 0.5),(|1>, 0.5))
"""
@classmethod
def _eval_args(cls, args):
# call this to qsympify the args
args = super(Density, cls)._eval_args(args)
for arg in args:
# Check if arg is a tuple
if not (isinstance(arg, Tuple) and
len(arg) == 2):
raise ValueError("Each argument should be of form [state,prob]"
" or ( state, prob )")
return args
def states(self):
"""Return list of all states.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.states()
(|0>, |1>)
"""
return Tuple(*[arg[0] for arg in self.args])
def probs(self):
"""Return list of all probabilities.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.probs()
(0.5, 0.5)
"""
return Tuple(*[arg[1] for arg in self.args])
def get_state(self, index):
"""Return specfic state by index.
Parameters
==========
index : index of state to be returned
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.states()[1]
|1>
"""
state = self.args[index][0]
return state
def get_prob(self, index):
"""Return probability of specific state by index.
Parameters
===========
index : index of states whose probability is returned.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.probs()[1]
0.500000000000000
"""
prob = self.args[index][1]
return prob
def apply_op(self, op):
"""op will operate on each individual state.
Parameters
==========
op : Operator
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> from sympy.physics.quantum.operator import Operator
>>> A = Operator('A')
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.apply_op(A)
'Density'((A*|0>, 0.5),(A*|1>, 0.5))
"""
new_args = [(op*state, prob) for (state, prob) in self.args]
return Density(*new_args)
def doit(self, **hints):
"""Expand the density operator into an outer product format.
Examples
========
>>> from sympy.physics.quantum.state import Ket
>>> from sympy.physics.quantum.density import Density
>>> from sympy.physics.quantum.operator import Operator
>>> A = Operator('A')
>>> d = Density([Ket(0), 0.5], [Ket(1),0.5])
>>> d.doit()
0.5*|0><0| + 0.5*|1><1|
"""
terms = []
for (state, prob) in self.args:
state = state.expand() # needed to break up (a+b)*c
if (isinstance(state, Add)):
for arg in product(state.args, repeat=2):
terms.append(prob *
self._generate_outer_prod(arg[0], arg[1]))
else:
terms.append(prob *
self._generate_outer_prod(state, state))
return Add(*terms)
def _generate_outer_prod(self, arg1, arg2):
c_part1, nc_part1 = arg1.args_cnc()
c_part2, nc_part2 = arg2.args_cnc()
if ( len(nc_part1) == 0 or
len(nc_part2) == 0 ):
raise ValueError('Atleast one-pair of'
' Non-commutative instance required'
' for outer product.')
# Muls of Tensor Products should be expanded
# before this function is called
if (isinstance(nc_part1[0], TensorProduct) and
len(nc_part1) == 1 and len(nc_part2) == 1):
op = tensor_product_simp(nc_part1[0] * Dagger(nc_part2[0]))
else:
op = Mul(*nc_part1) * Dagger(Mul(*nc_part2))
return Mul(*c_part1)*Mul(*c_part2)*op
def _represent(self, **options):
return represent(self.doit(), **options)
def _print_operator_name_latex(self, printer, *args):
return printer._print(r'\rho', *args)
def _print_operator_name_pretty(self, printer, *args):
return prettyForm(unichr('\N{GREEK SMALL LETTER RHO}'))
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', [])
return Tr(self.doit(), indices).doit()
def entropy(self):
""" Compute the entropy of a density matrix.
Refer to density.entropy() method for examples.
"""
return entropy(self)
def entropy(density):
"""Compute the entropy of a matrix/density object.
This computes -Tr(density*ln(density)) using the eigenvalue decomposition
of density, which is given as either a Density instance or a matrix
(numpy.ndarray, sympy.Matrix or scipy.sparse).
Parameters
==========
density : density matrix of type Density, sympy matrix,
scipy.sparse or numpy.ndarray
Examples
========
>>> from sympy.physics.quantum.density import Density, entropy
>>> from sympy.physics.quantum.represent import represent
>>> from sympy.physics.quantum.matrixutils import scipy_sparse_matrix
>>> from sympy.physics.quantum.spin import JzKet, Jz
>>> from sympy import S, log
>>> up = JzKet(S(1)/2,S(1)/2)
>>> down = JzKet(S(1)/2,-S(1)/2)
>>> d = Density((up,0.5),(down,0.5))
>>> entropy(d)
log(2)/2
"""
if isinstance(density, Density):
density = represent(density) # represent in Matrix
if isinstance(density, scipy_sparse_matrix):
density = to_numpy(density)
if isinstance(density, Matrix):
eigvals = density.eigenvals().keys()
return expand(-sum(e*log(e) for e in eigvals))
elif isinstance(density, numpy_ndarray):
import numpy as np
eigvals = np.linalg.eigvals(density)
return -np.sum(eigvals*np.log(eigvals))
else:
raise ValueError(
"numpy.ndarray, scipy.sparse or sympy matrix expected")
def fidelity(state1, state2):
""" Computes the fidelity [1]_ between two quantum states
The arguments provided to this function should be a square matrix or a
Density object. If it is a square matrix, it is assumed to be diagonalizable.
Parameters:
==========
state1, state2 : a density matrix or Matrix
Examples
========
>>> from sympy import S, sqrt
>>> from sympy.physics.quantum.dagger import Dagger
>>> from sympy.physics.quantum.spin import JzKet
>>> from sympy.physics.quantum.density import Density, fidelity
>>> from sympy.physics.quantum.represent import represent
>>>
>>> up = JzKet(S(1)/2,S(1)/2)
>>> down = JzKet(S(1)/2,-S(1)/2)
>>> amp = 1/sqrt(2)
>>> updown = (amp * up) + (amp * down)
>>>
>>> # represent turns Kets into matrices
>>> up_dm = represent(up * Dagger(up))
>>> down_dm = represent(down * Dagger(down))
>>> updown_dm = represent(updown * Dagger(updown))
>>>
>>> fidelity(up_dm, up_dm)
1
>>> fidelity(up_dm, down_dm) #orthogonal states
0
>>> fidelity(up_dm, updown_dm).evalf().round(3)
0.707
References
==========
.. [1] http://en.wikipedia.org/wiki/Fidelity_of_quantum_states
"""
state1 = represent(state1) if isinstance(state1, Density) else state1
state2 = represent(state2) if isinstance(state2, Density) else state2
if (not isinstance(state1, Matrix) or
not isinstance(state2, Matrix)):
raise ValueError("state1 and state2 must be of type Density or Matrix "
"received type=%s for state1 and type=%s for state2" %
(type(state1), type(state2)))
if ( state1.shape != state2.shape and state1.is_square):
raise ValueError("The dimensions of both args should be equal and the "
"matrix obtained should be a square matrix")
sqrt_state1 = state1**Rational(1, 2)
return Tr((sqrt_state1 * state2 * sqrt_state1)**Rational(1, 2)).doit()
| bsd-3-clause | 8,492,162,776,108,616,000 | 29.540373 | 90 | 0.575453 | false |
KarlTDebiec/westpa | lib/examples/wca-dimer_openmm/bruteforce/wcadimer.py | 12 | 10747 | #!/usr/local/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
WCA fluid and WCA dimer systems.
DESCRIPTION
COPYRIGHT
@author John D. Chodera <[email protected]>
All code in this repository is released under the GNU General Public License.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
TODO
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import numpy
import simtk
import simtk.unit as units
import simtk.openmm as openmm
#=============================================================================================
# CONSTANTS
#=============================================================================================
kB = units.BOLTZMANN_CONSTANT_kB * units.AVOGADRO_CONSTANT_NA
#=============================================================================================
# DEFAULT PARAMETERS
#=============================================================================================
natoms = 216 # number of particles
# WCA fluid parameters (argon).
mass = 39.9 * units.amu # reference mass
sigma = 3.4 * units.angstrom # reference lengthscale
epsilon = 120.0 * units.kelvin * kB # reference energy
r_WCA = 2.**(1./6.) * sigma # interaction truncation range
tau = units.sqrt(sigma**2 * mass / epsilon) # characteristic timescale
# Simulation conditions.
temperature = 0.824 / (kB / epsilon) # temperature
kT = kB * temperature # thermal energy
beta = 1.0 / kT # inverse temperature
density = 0.96 / sigma**3 # default density
stable_timestep = 0.001 * tau # stable timestep
collision_rate = 1 / tau # collision rate for Langevin interator
# Dimer potential parameters.
h = 5.0 * kT # barrier height
r0 = r_WCA # compact state distance
w = 0.5 * r_WCA # extended state distance is r0 + 2*w
#=============================================================================================
# WCA Fluid
#=============================================================================================
def WCAFluid(N=natoms, density=density, mm=None, mass=mass, epsilon=epsilon, sigma=sigma):
"""
Create a Weeks-Chandler-Andersen system.
OPTIONAL ARGUMENTS
N (int) - total number of atoms (default: 150)
density (float) - N sigma^3 / V (default: 0.96)
sigma
epsilon
"""
# Choose OpenMM package.
if mm is None:
mm = openmm
# Create system
system = mm.System()
# Compute total system volume.
volume = N / density
# Make system cubic in dimension.
length = volume**(1.0/3.0)
# TODO: Can we change this to use tuples or 3x3 array?
a = units.Quantity(numpy.array([1.0, 0.0, 0.0], numpy.float32), units.nanometer) * length/units.nanometer
b = units.Quantity(numpy.array([0.0, 1.0, 0.0], numpy.float32), units.nanometer) * length/units.nanometer
c = units.Quantity(numpy.array([0.0, 0.0, 1.0], numpy.float32), units.nanometer) * length/units.nanometer
print "box edge length = %s" % str(length)
system.setDefaultPeriodicBoxVectors(a, b, c)
# Add particles to system.
for n in range(N):
system.addParticle(mass)
# Create nonbonded force term implementing Kob-Andersen two-component Lennard-Jones interaction.
energy_expression = '4.0*epsilon*((sigma/r)^12 - (sigma/r)^6) + epsilon'
# Create force.
force = mm.CustomNonbondedForce(energy_expression)
# Set epsilon and sigma global parameters.
force.addGlobalParameter('epsilon', epsilon)
force.addGlobalParameter('sigma', sigma)
# Add particles
for n in range(N):
force.addParticle([])
# Set periodic boundary conditions with cutoff.
force.setNonbondedMethod(mm.CustomNonbondedForce.CutoffNonPeriodic)
print "setting cutoff distance to %s" % str(r_WCA)
force.setCutoffDistance(r_WCA)
# Add nonbonded force term to the system.
system.addForce(force)
# Create initial coordinates using random positions.
coordinates = units.Quantity(numpy.random.rand(N,3), units.nanometer) * (length / units.nanometer)
# Return system and coordinates.
return [system, coordinates]
#=============================================================================================
# WCA dimer plus fluid
#=============================================================================================
def WCADimer(N=natoms, density=density, mm=None, mass=mass, epsilon=epsilon, sigma=sigma, h=h, r0=r0, w=w):
"""
Create a bistable bonded pair of particles (indices 0 and 1) optionally surrounded by a Weeks-Chandler-Andersen fluid.
The bistable potential has form
U(r) = h*(1-((r-r0-w)/w)^2)^2
where r0 is the compact state separation, r0+2w is the extended state separation, and h is the barrier height.
The WCA potential has form
U(r) = 4 epsilon [ (sigma/r)^12 - (sigma/r)^6 ] + epsilon (r < r*)
= 0 (r >= r*)
where r* = 2^(1/6) sigma.
OPTIONAL ARGUMENTS
N (int) - total number of atoms (default: 2)
density (float) - number density of particles (default: 0.96 / sigma**3)
mass (simtk.unit.Quantity of mass) - particle mass (default: 39.948 amu)
sigma (simtk.unit.Quantity of length) - Lennard-Jones sigma parameter (default: 0.3405 nm)
epsilon (simtk.unit.Quantity of energy) - Lennard-Jones well depth (default: (119.8 Kelvin)*kB)
h (simtk.unit.Quantity of energy) - bistable potential barrier height (default: ???)
r0 (simtk.unit.Quantity of length) - bistable potential compact state separation (default: ???)
w (simtk.unit.Quantity of length) - bistable potential extended state separation is r0+2*w (default: ???)
"""
# Choose OpenMM package.
if mm is None:
mm = openmm
# Compute cutoff for WCA fluid.
r_WCA = 2.**(1./6.) * sigma # cutoff at minimum of potential
# Create system
system = mm.System()
# Compute total system volume.
volume = N / density
# Make system cubic in dimension.
length = volume**(1.0/3.0)
a = units.Quantity(numpy.array([1.0, 0.0, 0.0], numpy.float32), units.nanometer) * length/units.nanometer
b = units.Quantity(numpy.array([0.0, 1.0, 0.0], numpy.float32), units.nanometer) * length/units.nanometer
c = units.Quantity(numpy.array([0.0, 0.0, 1.0], numpy.float32), units.nanometer) * length/units.nanometer
print "box edge length = %s" % str(length)
system.setDefaultPeriodicBoxVectors(a, b, c)
# Add particles to system.
for n in range(N):
system.addParticle(mass)
# WCA: Lennard-Jones truncated at minim and shifted so potential is zero at cutoff.
energy_expression = '4.0*epsilon*((sigma/r)^12 - (sigma/r)^6) + epsilon'
# Create force.
force = mm.CustomNonbondedForce(energy_expression)
# Set epsilon and sigma global parameters.
force.addGlobalParameter('epsilon', epsilon)
force.addGlobalParameter('sigma', sigma)
# Add particles
for n in range(N):
force.addParticle([])
# Add exclusion between bonded particles.
force.addExclusion(0,1)
# Set periodic boundary conditions with cutoff.
if (N > 2):
force.setNonbondedMethod(mm.CustomNonbondedForce.CutoffPeriodic)
else:
force.setNonbondedMethod(mm.CustomNonbondedForce.CutoffNonPeriodic)
print "setting cutoff distance to %s" % str(r_WCA)
force.setCutoffDistance(r_WCA)
# Add nonbonded force term to the system.
system.addForce(force)
# Add dimer potential to first two particles.
dimer_force = openmm.CustomBondForce('h*(1-((r-r0-w)/w)^2)^2;')
dimer_force.addGlobalParameter('h', h) # barrier height
dimer_force.addGlobalParameter('r0', r0) # compact state separation
dimer_force.addGlobalParameter('w', w) # second minimum is at r0 + 2*w
dimer_force.addBond(0, 1, [])
system.addForce(dimer_force)
# Create initial coordinates using random positions.
coordinates = units.Quantity(numpy.random.rand(N,3), units.nanometer) * (length / units.nanometer)
# Reposition dimer particles at compact minimum.
coordinates[0,:] *= 0.0
coordinates[1,:] *= 0.0
coordinates[1,0] = r0
# Return system and coordinates.
return [system, coordinates]
#=============================================================================================
# WCA dimer in vacuum
#=============================================================================================
def WCADimerVacuum(mm=None, mass=mass, epsilon=epsilon, sigma=sigma, h=h, r0=r0, w=w):
"""
Create a bistable dimer.
OPTIONAL ARGUMENTS
"""
# Choose OpenMM package.
if mm is None:
mm = openmm
# Create system
system = mm.System()
# Add particles to system.
for n in range(2):
system.addParticle(mass)
# Add dimer potential to first two particles.
dimer_force = openmm.CustomBondForce('h*(1-((r-r0-w)/w)^2)^2;')
dimer_force.addGlobalParameter('h', h) # barrier height
dimer_force.addGlobalParameter('r0', r0) # compact state separation
dimer_force.addGlobalParameter('w', w) # second minimum is at r0 + 2*w
dimer_force.addBond(0, 1, [])
system.addForce(dimer_force)
# Create initial coordinates using random positions.
coordinates = units.Quantity(numpy.zeros([2,3], numpy.float64), units.nanometer)
# Reposition dimer particles at compact minimum.
coordinates[0,:] *= 0.0
coordinates[1,:] *= 0.0
coordinates[1,0] = r0
# Return system and coordinates.
return [system, coordinates]
| gpl-3.0 | -6,536,331,286,747,009,000 | 35.804795 | 122 | 0.576068 | false |
wolverineav/neutron | neutron/core_extensions/base.py | 32 | 1533 | # Copyright (c) 2015 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
NETWORK = 'network'
PORT = 'port'
CORE_RESOURCES = [NETWORK, PORT]
@six.add_metaclass(abc.ABCMeta)
class CoreResourceExtension(object):
@abc.abstractmethod
def process_fields(self, context, resource_type,
requested_resource, actual_resource):
"""Process extension fields.
:param context: neutron api request context
:param resource_type: core resource type (one of CORE_RESOURCES)
:param requested_resource: resource dict that contains extension fields
:param actual_resource: actual resource dict known to plugin
"""
@abc.abstractmethod
def extract_fields(self, resource_type, resource):
"""Extract extension fields.
:param resource_type: core resource type (one of CORE_RESOURCES)
:param resource: resource dict that contains extension fields
"""
| apache-2.0 | -6,785,748,535,176,739,000 | 30.9375 | 79 | 0.695369 | false |
40423105/2016fallcadp_hw | plugin/liquid_tags/graphviz.py | 245 | 3198 | """
GraphViz Tag
---------
This implements a Liquid-style graphviz tag for Pelican. You can use different
Graphviz programs like dot, neato, twopi etc. [1]
[1] http://www.graphviz.org/
Syntax
------
{% graphviz
<program> {
<DOT code>
}
%}
Examples
--------
{% graphviz
dot {
digraph graphname {
a -> b -> c;
b -> d;
}
}
%}
{% graphviz
twopi {
<code goes here>
}
%}
{% graphviz
neato {
<code goes here>
}
%}
...
Output
------
<div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,_BASE64_IMAGE DATA_/></div>
"""
import base64
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = '{% dot graphviz [program] [dot code] %}'
DOT_BLOCK_RE = re.compile(r'^\s*(?P<program>\w+)\s*\{\s*(?P<code>.*\})\s*\}$', re.MULTILINE | re.DOTALL)
def run_graphviz(program, code, options=[], format='png'):
""" Runs graphviz programs and returns image data
Copied from https://github.com/tkf/ipython-hierarchymagic/blob/master/hierarchymagic.py
"""
import os
from subprocess import Popen, PIPE
dot_args = [program] + options + ['-T', format]
if os.name == 'nt':
# Avoid opening shell window.
# * https://github.com/tkf/ipython-hierarchymagic/issues/1
# * http://stackoverflow.com/a/2935727/727827
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, creationflags=0x08000000)
else:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE)
wentwrong = False
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
stdout, stderr = p.communicate(code.encode('utf-8'))
except (OSError, IOError) as err:
if err.errno != EPIPE:
raise
wentwrong = True
except IOError as err:
if err.errno != EINVAL:
raise
wentwrong = True
if wentwrong:
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
raise RuntimeError('dot exited with error:\n[stderr]\n{0}'.format(stderr.decode('utf-8')))
return stdout
@LiquidTags.register('graphviz')
def graphviz_parser(preprocessor, tag, markup):
""" Simple Graphviz parser """
# Parse the markup string
m = DOT_BLOCK_RE.search(markup)
if m:
# Get program and DOT code
code = m.group('code')
program = m.group('program').strip()
# Run specified program with our markup
output = run_graphviz(program, code)
# Return Base64 encoded image
return '<div class="graphviz" style="text-align: center;"><img src="data:image/png;base64,%s"></div>' % base64.b64encode(output)
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from .liquid_tags import register
| agpl-3.0 | -6,084,093,312,393,851,000 | 23.984375 | 136 | 0.590369 | false |
Khilo84/PyQt4 | pyuic/uic/Loader/loader.py | 8 | 3049 | #############################################################################
##
## Copyright (C) 2011 Riverbank Computing Limited.
## Copyright (C) 2006 Thorsten Marek.
## All right reserved.
##
## This file is part of PyQt.
##
## You may use this file under the terms of the GPL v2 or the revised BSD
## license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of the Riverbank Computing Limited nor the names
## of its contributors may be used to endorse or promote products
## derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
#############################################################################
import os.path
from PyQt4 import QtGui, QtCore
from PyQt4.uic.uiparser import UIParser
from PyQt4.uic.Loader.qobjectcreator import LoaderCreatorPolicy
class DynamicUILoader(UIParser):
def __init__(self):
UIParser.__init__(self, QtCore, QtGui, LoaderCreatorPolicy())
def createToplevelWidget(self, classname, widgetname):
if self.toplevelInst is not None:
if not isinstance(self.toplevelInst, self.factory.findQObjectType(classname)):
raise TypeError(("Wrong base class of toplevel widget",
(type(self.toplevelInst), classname)))
return self.toplevelInst
else:
return self.factory.createQObject(classname, widgetname, ())
def loadUi(self, filename, toplevelInst=None):
self.toplevelInst = toplevelInst
if hasattr(filename, 'read'):
basedir = ''
else:
# Allow the filename to be a QString.
filename = str(filename)
basedir = os.path.dirname(filename)
return self.parse(filename, basedir)
| gpl-2.0 | -7,973,019,566,549,408,000 | 41.943662 | 90 | 0.673008 | false |
c86j224s/snippet | Python_Pygments/lib/python3.6/site-packages/pygments/lexers/ooc.py | 31 | 2999 | # -*- coding: utf-8 -*-
"""
pygments.lexers.ooc
~~~~~~~~~~~~~~~~~~~
Lexers for the Ooc language.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['OocLexer']
class OocLexer(RegexLexer):
"""
For `Ooc <http://ooc-lang.org/>`_ source code
.. versionadded:: 1.2
"""
name = 'Ooc'
aliases = ['ooc']
filenames = ['*.ooc']
mimetypes = ['text/x-ooc']
tokens = {
'root': [
(words((
'class', 'interface', 'implement', 'abstract', 'extends', 'from',
'this', 'super', 'new', 'const', 'final', 'static', 'import',
'use', 'extern', 'inline', 'proto', 'break', 'continue',
'fallthrough', 'operator', 'if', 'else', 'for', 'while', 'do',
'switch', 'case', 'as', 'in', 'version', 'return', 'true',
'false', 'null'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'include\b', Keyword, 'include'),
(r'(cover)([ \t]+)(from)([ \t]+)(\w+[*@]?)',
bygroups(Keyword, Text, Keyword, Text, Name.Class)),
(r'(func)((?:[ \t]|\\\n)+)(~[a-z_]\w*)',
bygroups(Keyword, Text, Name.Function)),
(r'\bfunc\b', Keyword),
# Note: %= and ^= not listed on http://ooc-lang.org/syntax
(r'//.*', Comment),
(r'(?s)/\*.*?\*/', Comment.Multiline),
(r'(==?|\+=?|-[=>]?|\*=?|/=?|:=|!=?|%=?|\?|>{1,3}=?|<{1,3}=?|\.\.|'
r'&&?|\|\|?|\^=?)', Operator),
(r'(\.)([ \t]*)([a-z]\w*)', bygroups(Operator, Text,
Name.Function)),
(r'[A-Z][A-Z0-9_]+', Name.Constant),
(r'[A-Z]\w*([@*]|\[[ \t]*\])?', Name.Class),
(r'([a-z]\w*(?:~[a-z]\w*)?)((?:[ \t]|\\\n)*)(?=\()',
bygroups(Name.Function, Text)),
(r'[a-z]\w*', Name.Variable),
# : introduces types
(r'[:(){}\[\];,]', Punctuation),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'0c[0-9]+', Number.Oct),
(r'0b[01]+', Number.Bin),
(r'[0-9_]\.[0-9_]*(?!\.)', Number.Float),
(r'[0-9_]+', Number.Decimal),
(r'"(?:\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\"])*"',
String.Double),
(r"'(?:\\.|\\[0-9]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'@', Punctuation), # pointer dereference
(r'\.', Punctuation), # imports or chain operator
(r'\\[ \t\n]', Text),
(r'[ \t]+', Text),
],
'include': [
(r'[\w/]+', Name),
(r',', Punctuation),
(r'[ \t]', Text),
(r'[;\n]', Text, '#pop'),
],
}
| apache-2.0 | 2,338,783,156,743,905,300 | 34.282353 | 81 | 0.400467 | false |
mytliulei/DCNRobotInstallPackages | windows/win32/pygal-1.7.0/pygal/test/test_pie.py | 4 | 2015 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
import os
import uuid
from pygal import Pie
def test_donut():
chart = Pie(inner_radius=.3, pretty_print=True)
chart.title = 'Browser usage in February 2012 (in %)'
chart.add('IE', 19.5)
chart.add('Firefox', 36.6)
chart.add('Chrome', 36.3)
chart.add('Safari', 4.5)
chart.add('Opera', 2.3)
assert chart.render()
def test_multiseries_donut():
# this just demos that the multiseries pie does not respect
# the inner_radius
chart = Pie(inner_radius=.3, pretty_print=True)
chart.title = 'Browser usage by version in February 2012 (in %)'
chart.add('IE', [5.7, 10.2, 2.6, 1])
chart.add('Firefox', [.6, 16.8, 7.4, 2.2, 1.2, 1, 1, 1.1, 4.3, 1])
chart.add('Chrome', [.3, .9, 17.1, 15.3, .6, .5, 1.6])
chart.add('Safari', [4.4, .1])
chart.add('Opera', [.1, 1.6, .1, .5])
assert chart.render()
def test_half_pie():
pie = Pie()
pie.add('IE', 19.5)
pie.add('Firefox', 36.6)
pie.add('Chrome', 36.3)
pie.add('Safari', 4.5)
pie.add('Opera', 2.3)
half = Pie(half_pie=True)
half.add('IE', 19.5)
half.add('Firefox', 36.6)
half.add('Chrome', 36.3)
half.add('Safari', 4.5)
half.add('Opera', 2.3)
assert pie.render() != half.render()
| apache-2.0 | -8,003,063,742,278,317,000 | 31.483871 | 79 | 0.644985 | false |
pbarton666/buzz_bot | djangoproj/djangoapp/csc/webapi/urls.py | 1 | 2618 | from django.conf.urls.defaults import *
from piston.resource import Resource
from csc.webapi.docs import documentation_view
from csc.webapi.handlers import *
# This gives a way to accept "query.foo" on the end of the URL to set the
# format to 'foo'. "?format=foo" works as well.
Q = r'(query\.(?P<emitter_format>.+))?$'
urlpatterns = patterns('',
url(r'^(?P<lang>.+)/concept/(?P<concept>[^/]*)/'+Q,
Resource(ConceptHandler), name='concept_handler'),
url(r'^(?P<lang>.+)/concept/(?P<concept>[^/]*)/assertions/'+Q,
Resource(ConceptAssertionHandler), name='concept_assertion_handler_default'),
url(r'^(?P<lang>.+)/concept/(?P<concept>[^/]*)/assertions/limit:(?P<limit>[0-9]+)/'+Q,
Resource(ConceptAssertionHandler), name='concept_assertion_handler'),
url(r'^(?P<lang>.+)/concept/(?P<concept>[^/]*)/surfaceforms/'+Q,
Resource(ConceptSurfaceHandler), name='concept_surface_handler_default'),
url(r'^(?P<lang>.+)/concept/(?P<concept>[^/]*)/surfaceforms/limit:(?P<limit>[0-9]+)/'+Q,
Resource(ConceptSurfaceHandler), name='concept_surface_handler'),
url(r'^(?P<lang>.+)/(?P<dir>left|right)feature/(?P<relation>[^/]+)/(?P<concept>[^/]+)/'+Q,
Resource(FeatureQueryHandler), name='feature_query_handler_default'),
url(r'^(?P<lang>.+)/(?P<dir>left|right)feature/(?P<relation>[^/]+)/(?P<concept>[^/]+)/limit:(?P<limit>[0-9]+)/'+Q,
Resource(FeatureQueryHandler), name='feature_query_handler'),
url(r'^(?P<lang>.+)/(?P<type>.+)/(?P<id>[0-9]+)/votes/'+Q,
Resource(RatedObjectHandler), name='rated_object_handler'),
url(r'^(?P<lang>.+)/surface/(?P<text>.+)/'+Q,
Resource(SurfaceFormHandler), name='surface_form_handler'),
url(r'^(?P<lang>.+)/frame/(?P<id>[0-9]+)/'+Q,
Resource(FrameHandler), name='frame_handler'),
url(r'^(?P<lang>.+)/frame/(?P<id>[0-9]+)/statements/'+Q,
Resource(RawAssertionByFrameHandler),
name='raw_assertion_by_frame_handler_default'),
url(r'^(?P<lang>.+)/frame/(?P<id>[0-9]+)/statements/limit:(?P<limit>[0-9]+)/'+Q,
Resource(RawAssertionByFrameHandler),
name='raw_assertion_by_frame_handler'),
url(r'^(?P<lang>.+)/assertion/(?P<id>[0-9]+)/'+Q,
Resource(AssertionHandler), name='assertion_handler'),
url(r'^(?P<lang>.+)/raw_assertion/(?P<id>[0-9]+)/'+Q,
Resource(RawAssertionHandler), name='raw_assertion_handler'),
url(r'^(?P<lang>.+)/frequency/(?P<text>[^/]*)/'+Q,
Resource(FrequencyHandler), name='frequency_handler'),
url(r'docs.txt$',
documentation_view, name='documentation_view')
)
# :vim:tw=0:
| mit | 3,591,715,415,787,534,300 | 55.913043 | 118 | 0.617647 | false |
s0930342674/pyload | module/plugins/container/RSDF.py | 15 | 1699 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import binascii
import re
from Crypto.Cipher import AES
from module.plugins.internal.Container import Container
from module.utils import fs_encode
class RSDF(Container):
__name__ = "RSDF"
__type__ = "container"
__version__ = "0.31"
__status__ = "testing"
__pattern__ = r'.+\.rsdf$'
__description__ = """RSDF container decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("RaNaN", "[email protected]"),
("spoob", "[email protected]"),
("Walter Purcaro", "[email protected]")]
KEY = "8C35192D964DC3182C6F84F3252239EB4A320D2500000000"
IV = "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"
def decrypt(self, pyfile):
KEY = binascii.unhexlify(self.KEY)
IV = binascii.unhexlify(self.IV)
iv = AES.new(KEY, AES.MODE_ECB).encrypt(IV)
cipher = AES.new(KEY, AES.MODE_CFB, iv)
try:
fs_filename = fs_encode(pyfile.url.strip())
with open(fs_filename, 'r') as rsdf:
data = rsdf.read()
except IOError, e:
self.fail(e)
if re.search(r"<title>404 - Not Found</title>", data):
pyfile.setStatus("offline")
else:
try:
raw_links = binascii.unhexlify(''.join(data.split())).splitlines()
except TypeError:
self.fail(_("Container is corrupted"))
for link in raw_links:
if not link:
continue
link = cipher.decrypt(link.decode('base64')).replace('CCF: ', '')
self.urls.append(link)
| gpl-3.0 | 2,880,596,217,975,472,600 | 26.403226 | 82 | 0.542672 | false |
takis/django | django/contrib/gis/geoip2/base.py | 335 | 9054 | import os
import socket
import geoip2.database
from django.conf import settings
from django.core.validators import ipv4_re
from django.utils import six
from django.utils.ipv6 import is_valid_ipv6_address
from .resources import City, Country
# Creating the settings dictionary with any settings, if needed.
GEOIP_SETTINGS = {
'GEOIP_PATH': getattr(settings, 'GEOIP_PATH', None),
'GEOIP_CITY': getattr(settings, 'GEOIP_CITY', 'GeoLite2-City.mmdb'),
'GEOIP_COUNTRY': getattr(settings, 'GEOIP_COUNTRY', 'GeoLite2-Country.mmdb'),
}
class GeoIP2Exception(Exception):
pass
class GeoIP2(object):
# The flags for GeoIP memory caching.
# Try MODE_MMAP_EXT, MODE_MMAP, MODE_FILE in that order.
MODE_AUTO = 0
# Use the C extension with memory map.
MODE_MMAP_EXT = 1
# Read from memory map. Pure Python.
MODE_MMAP = 2
# Read database as standard file. Pure Python.
MODE_FILE = 4
# Load database into memory. Pure Python.
MODE_MEMORY = 8
cache_options = {opt: None for opt in (0, 1, 2, 4, 8)}
# Paths to the city & country binary databases.
_city_file = ''
_country_file = ''
# Initially, pointers to GeoIP file references are NULL.
_city = None
_country = None
def __init__(self, path=None, cache=0, country=None, city=None):
"""
Initialize the GeoIP object. No parameters are required to use default
settings. Keyword arguments may be passed in to customize the locations
of the GeoIP datasets.
* path: Base directory to where GeoIP data is located or the full path
to where the city or country data files (*.mmdb) are located.
Assumes that both the city and country data sets are located in
this directory; overrides the GEOIP_PATH setting.
* cache: The cache settings when opening up the GeoIP datasets. May be
an integer in (0, 1, 2, 4, 8) corresponding to the MODE_AUTO,
MODE_MMAP_EXT, MODE_MMAP, MODE_FILE, and MODE_MEMORY,
`GeoIPOptions` C API settings, respectively. Defaults to 0,
meaning MODE_AUTO.
* country: The name of the GeoIP country data file. Defaults to
'GeoLite2-Country.mmdb'; overrides the GEOIP_COUNTRY setting.
* city: The name of the GeoIP city data file. Defaults to
'GeoLite2-City.mmdb'; overrides the GEOIP_CITY setting.
"""
# Checking the given cache option.
if cache in self.cache_options:
self._cache = cache
else:
raise GeoIP2Exception('Invalid GeoIP caching option: %s' % cache)
# Getting the GeoIP data path.
if not path:
path = GEOIP_SETTINGS['GEOIP_PATH']
if not path:
raise GeoIP2Exception('GeoIP path must be provided via parameter or the GEOIP_PATH setting.')
if not isinstance(path, six.string_types):
raise TypeError('Invalid path type: %s' % type(path).__name__)
if os.path.isdir(path):
# Constructing the GeoIP database filenames using the settings
# dictionary. If the database files for the GeoLite country
# and/or city datasets exist, then try to open them.
country_db = os.path.join(path, country or GEOIP_SETTINGS['GEOIP_COUNTRY'])
if os.path.isfile(country_db):
self._country = geoip2.database.Reader(country_db, mode=cache)
self._country_file = country_db
city_db = os.path.join(path, city or GEOIP_SETTINGS['GEOIP_CITY'])
if os.path.isfile(city_db):
self._city = geoip2.database.Reader(city_db, mode=cache)
self._city_file = city_db
elif os.path.isfile(path):
# Otherwise, some detective work will be needed to figure out
# whether the given database path is for the GeoIP country or city
# databases.
reader = geoip2.database.Reader(path, mode=cache)
db_type = reader.metadata().database_type
if db_type.endswith('City'):
# GeoLite City database detected.
self._city = reader
self._city_file = path
elif db_type.endswith('Country'):
# GeoIP Country database detected.
self._country = reader
self._country_file = path
else:
raise GeoIP2Exception('Unable to recognize database edition: %s' % db_type)
else:
raise GeoIP2Exception('GeoIP path must be a valid file or directory.')
@property
def _reader(self):
if self._country:
return self._country
else:
return self._city
@property
def _country_or_city(self):
if self._country:
return self._country.country
else:
return self._city.city
def __del__(self):
# Cleanup any GeoIP file handles lying around.
if self._reader:
self._reader.close()
def __repr__(self):
meta = self._reader.metadata()
version = '[v%s.%s]' % (meta.binary_format_major_version, meta.binary_format_minor_version)
return '<%(cls)s %(version)s _country_file="%(country)s", _city_file="%(city)s">' % {
'cls': self.__class__.__name__,
'version': version,
'country': self._country_file,
'city': self._city_file,
}
def _check_query(self, query, country=False, city=False, city_or_country=False):
"Helper routine for checking the query and database availability."
# Making sure a string was passed in for the query.
if not isinstance(query, six.string_types):
raise TypeError('GeoIP query must be a string, not type %s' % type(query).__name__)
# Extra checks for the existence of country and city databases.
if city_or_country and not (self._country or self._city):
raise GeoIP2Exception('Invalid GeoIP country and city data files.')
elif country and not self._country:
raise GeoIP2Exception('Invalid GeoIP country data file: %s' % self._country_file)
elif city and not self._city:
raise GeoIP2Exception('Invalid GeoIP city data file: %s' % self._city_file)
# Return the query string back to the caller. GeoIP2 only takes IP addresses.
if not (ipv4_re.match(query) or is_valid_ipv6_address(query)):
query = socket.gethostbyname(query)
return query
def city(self, query):
"""
Return a dictionary of city information for the given IP address or
Fully Qualified Domain Name (FQDN). Some information in the dictionary
may be undefined (None).
"""
enc_query = self._check_query(query, city=True)
return City(self._city.city(enc_query))
def country_code(self, query):
"Return the country code for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_code']
def country_name(self, query):
"Return the country name for the given IP Address or FQDN."
enc_query = self._check_query(query, city_or_country=True)
return self.country(enc_query)['country_name']
def country(self, query):
"""
Return a dictionary with the country code and name when given an
IP address or a Fully Qualified Domain Name (FQDN). For example, both
'24.124.1.80' and 'djangoproject.com' are valid parameters.
"""
# Returning the country code and name
enc_query = self._check_query(query, city_or_country=True)
return Country(self._country_or_city(enc_query))
# #### Coordinate retrieval routines ####
def coords(self, query, ordering=('longitude', 'latitude')):
cdict = self.city(query)
if cdict is None:
return None
else:
return tuple(cdict[o] for o in ordering)
def lon_lat(self, query):
"Return a tuple of the (longitude, latitude) for the given query."
return self.coords(query)
def lat_lon(self, query):
"Return a tuple of the (latitude, longitude) for the given query."
return self.coords(query, ('latitude', 'longitude'))
def geos(self, query):
"Return a GEOS Point object for the given query."
ll = self.lon_lat(query)
if ll:
from django.contrib.gis.geos import Point
return Point(ll, srid=4326)
else:
return None
# #### GeoIP Database Information Routines ####
@property
def info(self):
"Return information about the GeoIP library and databases in use."
meta = self._reader.metadata()
return 'GeoIP Library:\n\t%s.%s\n' % (meta.binary_format_major_version, meta.binary_format_minor_version)
@classmethod
def open(cls, full_path, cache):
return GeoIP2(full_path, cache)
| bsd-3-clause | -2,319,498,031,628,464,000 | 38.537118 | 113 | 0.617186 | false |
yawnosnorous/python-for-android | python-modules/twisted/twisted/web/test/test_error.py | 52 | 5433 | # Copyright (c) 2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
HTTP errors.
"""
from twisted.trial import unittest
from twisted.web import error
class ErrorTestCase(unittest.TestCase):
"""
Tests for how L{Error} attributes are initialized.
"""
def test_noMessageValidStatus(self):
"""
If no C{message} argument is passed to the L{Error} constructor and the
C{code} argument is a valid HTTP status code, C{code} is mapped to a
descriptive string to which C{message} is assigned.
"""
e = error.Error("200")
self.assertEquals(e.message, "OK")
def test_noMessageInvalidStatus(self):
"""
If no C{message} argument is passed to the L{Error} constructor and
C{code} isn't a valid HTTP status code, C{message} stays C{None}.
"""
e = error.Error("InvalidCode")
self.assertEquals(e.message, None)
def test_messageExists(self):
"""
If a C{message} argument is passed to the L{Error} constructor, the
C{message} isn't affected by the value of C{status}.
"""
e = error.Error("200", "My own message")
self.assertEquals(e.message, "My own message")
class PageRedirectTestCase(unittest.TestCase):
"""
Tests for how L{PageRedirect} attributes are initialized.
"""
def test_noMessageValidStatus(self):
"""
If no C{message} argument is passed to the L{PageRedirect} constructor
and the C{code} argument is a valid HTTP status code, C{code} is mapped
to a descriptive string to which C{message} is assigned.
"""
e = error.PageRedirect("200", location="/foo")
self.assertEquals(e.message, "OK to /foo")
def test_noMessageValidStatusNoLocation(self):
"""
If no C{message} argument is passed to the L{PageRedirect} constructor
and C{location} is also empty and the C{code} argument is a valid HTTP
status code, C{code} is mapped to a descriptive string to which
C{message} is assigned without trying to include an empty location.
"""
e = error.PageRedirect("200")
self.assertEquals(e.message, "OK")
def test_noMessageInvalidStatusLocationExists(self):
"""
If no C{message} argument is passed to the L{PageRedirect} constructor
and C{code} isn't a valid HTTP status code, C{message} stays C{None}.
"""
e = error.PageRedirect("InvalidCode", location="/foo")
self.assertEquals(e.message, None)
def test_messageExistsLocationExists(self):
"""
If a C{message} argument is passed to the L{PageRedirect} constructor,
the C{message} isn't affected by the value of C{status}.
"""
e = error.PageRedirect("200", "My own message", location="/foo")
self.assertEquals(e.message, "My own message to /foo")
def test_messageExistsNoLocation(self):
"""
If a C{message} argument is passed to the L{PageRedirect} constructor
and no location is provided, C{message} doesn't try to include the empty
location.
"""
e = error.PageRedirect("200", "My own message")
self.assertEquals(e.message, "My own message")
class InfiniteRedirectionTestCase(unittest.TestCase):
"""
Tests for how L{InfiniteRedirection} attributes are initialized.
"""
def test_noMessageValidStatus(self):
"""
If no C{message} argument is passed to the L{InfiniteRedirection}
constructor and the C{code} argument is a valid HTTP status code,
C{code} is mapped to a descriptive string to which C{message} is
assigned.
"""
e = error.InfiniteRedirection("200", location="/foo")
self.assertEquals(e.message, "OK to /foo")
def test_noMessageValidStatusNoLocation(self):
"""
If no C{message} argument is passed to the L{InfiniteRedirection}
constructor and C{location} is also empty and the C{code} argument is a
valid HTTP status code, C{code} is mapped to a descriptive string to
which C{message} is assigned without trying to include an empty
location.
"""
e = error.InfiniteRedirection("200")
self.assertEquals(e.message, "OK")
def test_noMessageInvalidStatusLocationExists(self):
"""
If no C{message} argument is passed to the L{InfiniteRedirection}
constructor and C{code} isn't a valid HTTP status code, C{message} stays
C{None}.
"""
e = error.InfiniteRedirection("InvalidCode", location="/foo")
self.assertEquals(e.message, None)
def test_messageExistsLocationExists(self):
"""
If a C{message} argument is passed to the L{InfiniteRedirection}
constructor, the C{message} isn't affected by the value of C{status}.
"""
e = error.InfiniteRedirection("200", "My own message", location="/foo")
self.assertEquals(e.message, "My own message to /foo")
def test_messageExistsNoLocation(self):
"""
If a C{message} argument is passed to the L{InfiniteRedirection}
constructor and no location is provided, C{message} doesn't try to
include the empty location.
"""
e = error.InfiniteRedirection("200", "My own message")
self.assertEquals(e.message, "My own message")
| apache-2.0 | -8,810,419,224,234,900,000 | 34.980132 | 80 | 0.641082 | false |
Plexxi/st2 | st2common/tests/unit/services/test_workflow.py | 3 | 18346 | # Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import copy
import mock
from orquesta import exceptions as orquesta_exc
from orquesta.specs import loader as specs_loader
from orquesta import statuses as wf_statuses
import st2tests
import st2tests.config as tests_config
tests_config.parse_args()
from st2common.bootstrap import actionsregistrar
from st2common.bootstrap import runnersregistrar
from st2common.exceptions import action as action_exc
from st2common.models.db import liveaction as lv_db_models
from st2common.models.db import execution as ex_db_models
from st2common.models.db import pack as pk_db_models
from st2common.persistence import execution as ex_db_access
from st2common.persistence import pack as pk_db_access
from st2common.persistence import workflow as wf_db_access
from st2common.services import action as action_service
from st2common.services import workflows as workflow_service
from st2common.transport import liveaction as lv_ac_xport
from st2common.transport import publishers
from st2tests.mocks import liveaction as mock_lv_ac_xport
TEST_PACK = "orquesta_tests"
TEST_PACK_PATH = (
st2tests.fixturesloader.get_fixtures_packs_base_path() + "/" + TEST_PACK
)
PACK_7 = "dummy_pack_7"
PACK_7_PATH = st2tests.fixturesloader.get_fixtures_packs_base_path() + "/" + PACK_7
PACKS = [
TEST_PACK_PATH,
PACK_7_PATH,
st2tests.fixturesloader.get_fixtures_packs_base_path() + "/core",
]
@mock.patch.object(
publishers.CUDPublisher, "publish_update", mock.MagicMock(return_value=None)
)
@mock.patch.object(
publishers.CUDPublisher,
"publish_create",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_create),
)
@mock.patch.object(
lv_ac_xport.LiveActionPublisher,
"publish_state",
mock.MagicMock(side_effect=mock_lv_ac_xport.MockLiveActionPublisher.publish_state),
)
class WorkflowExecutionServiceTest(st2tests.WorkflowTestCase):
@classmethod
def setUpClass(cls):
super(WorkflowExecutionServiceTest, cls).setUpClass()
# Register runners.
runnersregistrar.register_runners()
# Register test pack(s).
actions_registrar = actionsregistrar.ActionsRegistrar(
use_pack_cache=False, fail_on_failure=True
)
for pack in PACKS:
actions_registrar.register_from_pack(pack)
def test_request(self):
wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db)
# Request the workflow execution.
wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta)
st2_ctx = self.mock_st2_context(ac_ex_db)
wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx)
# Check workflow execution is saved to the database.
wf_ex_dbs = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)
self.assertEqual(len(wf_ex_dbs), 1)
# Check required attributes.
wf_ex_db = wf_ex_dbs[0]
self.assertIsNotNone(wf_ex_db.id)
self.assertGreater(wf_ex_db.rev, 0)
self.assertEqual(wf_ex_db.action_execution, str(ac_ex_db.id))
self.assertEqual(wf_ex_db.status, wf_statuses.REQUESTED)
def test_request_with_input(self):
wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db = lv_db_models.LiveActionDB(
action=wf_meta["name"], parameters={"who": "stan"}
)
lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db)
# Request the workflow execution.
wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta)
st2_ctx = self.mock_st2_context(ac_ex_db)
wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx)
# Check workflow execution is saved to the database.
wf_ex_dbs = wf_db_access.WorkflowExecution.query(
action_execution=str(ac_ex_db.id)
)
self.assertEqual(len(wf_ex_dbs), 1)
# Check required attributes.
wf_ex_db = wf_ex_dbs[0]
self.assertIsNotNone(wf_ex_db.id)
self.assertGreater(wf_ex_db.rev, 0)
self.assertEqual(wf_ex_db.action_execution, str(ac_ex_db.id))
self.assertEqual(wf_ex_db.status, wf_statuses.REQUESTED)
# Check input and context.
expected_input = {"who": "stan"}
self.assertDictEqual(wf_ex_db.input, expected_input)
def test_request_bad_action(self):
wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
# Manually create the action execution object with the bad action.
ac_ex_db = ex_db_models.ActionExecutionDB(
action={"ref": "mock.foobar"}, runner={"name": "foobar"}
)
# Request the workflow execution.
self.assertRaises(
action_exc.InvalidActionReferencedException,
workflow_service.request,
self.get_wf_def(TEST_PACK_PATH, wf_meta),
ac_ex_db,
self.mock_st2_context(ac_ex_db),
)
def test_request_wf_def_with_bad_action_ref(self):
wf_meta = self.get_wf_fixture_meta_data(
TEST_PACK_PATH, "fail-inspection-action-ref.yaml"
)
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db)
# Exception is expected on request of workflow execution.
self.assertRaises(
orquesta_exc.WorkflowInspectionError,
workflow_service.request,
self.get_wf_def(TEST_PACK_PATH, wf_meta),
ac_ex_db,
self.mock_st2_context(ac_ex_db),
)
def test_request_wf_def_with_unregistered_action(self):
wf_meta = self.get_wf_fixture_meta_data(
TEST_PACK_PATH, "fail-inspection-action-db.yaml"
)
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db)
# Exception is expected on request of workflow execution.
self.assertRaises(
orquesta_exc.WorkflowInspectionError,
workflow_service.request,
self.get_wf_def(TEST_PACK_PATH, wf_meta),
ac_ex_db,
self.mock_st2_context(ac_ex_db),
)
def test_request_wf_def_with_missing_required_action_param(self):
wf_name = "fail-inspection-missing-required-action-param"
wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_name + ".yaml")
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db)
# Exception is expected on request of workflow execution.
self.assertRaises(
orquesta_exc.WorkflowInspectionError,
workflow_service.request,
self.get_wf_def(TEST_PACK_PATH, wf_meta),
ac_ex_db,
self.mock_st2_context(ac_ex_db),
)
def test_request_wf_def_with_unexpected_action_param(self):
wf_name = "fail-inspection-unexpected-action-param"
wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, wf_name + ".yaml")
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db)
# Exception is expected on request of workflow execution.
self.assertRaises(
orquesta_exc.WorkflowInspectionError,
workflow_service.request,
self.get_wf_def(TEST_PACK_PATH, wf_meta),
ac_ex_db,
self.mock_st2_context(ac_ex_db),
)
def test_request_task_execution(self):
wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db)
# Request the workflow execution.
wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta)
st2_ctx = self.mock_st2_context(ac_ex_db)
wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx)
spec_module = specs_loader.get_spec_module(wf_ex_db.spec["catalog"])
wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec)
# Manually request task execution.
task_route = 0
task_id = "task1"
task_spec = wf_spec.tasks.get_task(task_id)
task_ctx = {"foo": "bar"}
st2_ctx = {"execution_id": wf_ex_db.action_execution}
task_ex_req = {
"id": task_id,
"route": task_route,
"spec": task_spec,
"ctx": task_ctx,
"actions": [
{"action": "core.echo", "input": {"message": "Veni, vidi, vici."}}
],
}
workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req)
# Check task execution is saved to the database.
task_ex_dbs = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)
self.assertEqual(len(task_ex_dbs), 1)
# Check required attributes.
task_ex_db = task_ex_dbs[0]
self.assertIsNotNone(task_ex_db.id)
self.assertGreater(task_ex_db.rev, 0)
self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id))
self.assertEqual(task_ex_db.status, wf_statuses.RUNNING)
# Check action execution for the task query with task execution ID.
ac_ex_dbs = ex_db_access.ActionExecution.query(
task_execution=str(task_ex_db.id)
)
self.assertEqual(len(ac_ex_dbs), 1)
# Check action execution for the task query with workflow execution ID.
ac_ex_dbs = ex_db_access.ActionExecution.query(
workflow_execution=str(wf_ex_db.id)
)
self.assertEqual(len(ac_ex_dbs), 1)
def test_request_task_execution_bad_action(self):
wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db)
# Request the workflow execution.
wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta)
st2_ctx = self.mock_st2_context(ac_ex_db)
wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx)
spec_module = specs_loader.get_spec_module(wf_ex_db.spec["catalog"])
wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec)
# Manually request task execution.
task_route = 0
task_id = "task1"
task_spec = wf_spec.tasks.get_task(task_id)
task_ctx = {"foo": "bar"}
st2_ctx = {"execution_id": wf_ex_db.action_execution}
task_ex_req = {
"id": task_id,
"route": task_route,
"spec": task_spec,
"ctx": task_ctx,
"actions": [
{"action": "mock.echo", "input": {"message": "Veni, vidi, vici."}}
],
}
self.assertRaises(
action_exc.InvalidActionReferencedException,
workflow_service.request_task_execution,
wf_ex_db,
st2_ctx,
task_ex_req,
)
def test_handle_action_execution_completion(self):
wf_meta = self.get_wf_fixture_meta_data(TEST_PACK_PATH, "sequential.yaml")
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db)
# Request and pre-process the workflow execution.
wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta)
st2_ctx = self.mock_st2_context(ac_ex_db)
wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx)
wf_ex_db = self.prep_wf_ex(wf_ex_db)
# Manually request task execution.
self.run_workflow_step(wf_ex_db, "task1", 0, ctx={"foo": "bar"})
# Check that a new task is executed.
self.assert_task_running("task2", 0)
def test_evaluate_action_execution_delay(self):
base_task_ex_req = {"task_id": "task1", "task_name": "task1", "route": 0}
# No task delay.
task_ex_req = copy.deepcopy(base_task_ex_req)
ac_ex_req = {"action": "core.noop", "input": None}
actual_delay = workflow_service.eval_action_execution_delay(
task_ex_req, ac_ex_req
)
self.assertIsNone(actual_delay)
# Simple task delay.
task_ex_req = copy.deepcopy(base_task_ex_req)
task_ex_req["delay"] = 180
ac_ex_req = {"action": "core.noop", "input": None}
actual_delay = workflow_service.eval_action_execution_delay(
task_ex_req, ac_ex_req
)
self.assertEqual(actual_delay, 180)
# Task delay for with items task and with no concurrency.
task_ex_req = copy.deepcopy(base_task_ex_req)
task_ex_req["delay"] = 180
task_ex_req["concurrency"] = None
ac_ex_req = {"action": "core.noop", "input": None, "items_id": 0}
actual_delay = workflow_service.eval_action_execution_delay(
task_ex_req, ac_ex_req, True
)
self.assertEqual(actual_delay, 180)
# Task delay for with items task, with concurrency, and evaluate first item.
task_ex_req = copy.deepcopy(base_task_ex_req)
task_ex_req["delay"] = 180
task_ex_req["concurrency"] = 1
ac_ex_req = {"action": "core.noop", "input": None, "item_id": 0}
actual_delay = workflow_service.eval_action_execution_delay(
task_ex_req, ac_ex_req, True
)
self.assertEqual(actual_delay, 180)
# Task delay for with items task, with concurrency, and evaluate later items.
task_ex_req = copy.deepcopy(base_task_ex_req)
task_ex_req["delay"] = 180
task_ex_req["concurrency"] = 1
ac_ex_req = {"action": "core.noop", "input": None, "item_id": 1}
actual_delay = workflow_service.eval_action_execution_delay(
task_ex_req, ac_ex_req, True
)
self.assertIsNone(actual_delay)
def test_request_action_execution_render(self):
# Manually create ConfigDB
output = "Testing"
value = {"config_item_one": output}
config_db = pk_db_models.ConfigDB(pack=PACK_7, values=value)
config = pk_db_access.Config.add_or_update(config_db)
self.assertEqual(len(config), 3)
wf_meta = self.get_wf_fixture_meta_data(
TEST_PACK_PATH, "render_config_context.yaml"
)
# Manually create the liveaction and action execution objects without publishing.
lv_ac_db = lv_db_models.LiveActionDB(action=wf_meta["name"])
lv_ac_db, ac_ex_db = action_service.create_request(lv_ac_db)
# Request the workflow execution.
wf_def = self.get_wf_def(TEST_PACK_PATH, wf_meta)
st2_ctx = self.mock_st2_context(ac_ex_db)
wf_ex_db = workflow_service.request(wf_def, ac_ex_db, st2_ctx)
spec_module = specs_loader.get_spec_module(wf_ex_db.spec["catalog"])
wf_spec = spec_module.WorkflowSpec.deserialize(wf_ex_db.spec)
# Pass down appropriate st2 context to the task and action execution(s).
root_st2_ctx = wf_ex_db.context.get("st2", {})
st2_ctx = {
"execution_id": wf_ex_db.action_execution,
"user": root_st2_ctx.get("user"),
"pack": root_st2_ctx.get("pack"),
}
# Manually request task execution.
task_route = 0
task_id = "task1"
task_spec = wf_spec.tasks.get_task(task_id)
task_ctx = {"foo": "bar"}
task_ex_req = {
"id": task_id,
"route": task_route,
"spec": task_spec,
"ctx": task_ctx,
"actions": [
{"action": "dummy_pack_7.render_config_context", "input": None}
],
}
workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req)
# Check task execution is saved to the database.
task_ex_dbs = wf_db_access.TaskExecution.query(
workflow_execution=str(wf_ex_db.id)
)
self.assertEqual(len(task_ex_dbs), 1)
workflow_service.request_task_execution(wf_ex_db, st2_ctx, task_ex_req)
# Manually request action execution
task_ex_db = task_ex_dbs[0]
action_ex_db = workflow_service.request_action_execution(
wf_ex_db, task_ex_db, st2_ctx, task_ex_req["actions"][0]
)
# Check required attributes.
self.assertIsNotNone(str(action_ex_db.id))
self.assertEqual(task_ex_db.workflow_execution, str(wf_ex_db.id))
expected_parameters = {"value1": output}
self.assertEqual(expected_parameters, action_ex_db.parameters)
| apache-2.0 | 8,585,207,601,729,925,000 | 38.369099 | 89 | 0.630819 | false |
draugiskisprendimai/odoo | openerp/addons/base/tests/test_ir_attachment.py | 433 | 3536 | import hashlib
import os
import openerp
import openerp.tests.common
HASH_SPLIT = 2 # FIXME: testing implementations detail is not a good idea
class test_ir_attachment(openerp.tests.common.TransactionCase):
def setUp(self):
super(test_ir_attachment, self).setUp()
registry, cr, uid = self.registry, self.cr, self.uid
self.ira = registry('ir.attachment')
self.filestore = self.ira._filestore(cr, uid)
# Blob1
self.blob1 = 'blob1'
self.blob1_b64 = self.blob1.encode('base64')
blob1_hash = hashlib.sha1(self.blob1).hexdigest()
self.blob1_fname = blob1_hash[:HASH_SPLIT] + '/' + blob1_hash
# Blob2
blob2 = 'blob2'
self.blob2_b64 = blob2.encode('base64')
def test_01_store_in_db(self):
registry, cr, uid = self.registry, self.cr, self.uid
# force storing in database
registry('ir.config_parameter').set_param(cr, uid, 'ir_attachment.location', 'db')
# 'ir_attachment.location' is undefined test database storage
a1 = self.ira.create(cr, uid, {'name': 'a1', 'datas': self.blob1_b64})
a1_read = self.ira.read(cr, uid, [a1], ['datas'])
self.assertEqual(a1_read[0]['datas'], self.blob1_b64)
a1_db_datas = self.ira.browse(cr, uid, a1).db_datas
self.assertEqual(a1_db_datas, self.blob1_b64)
def test_02_store_on_disk(self):
registry, cr, uid = self.registry, self.cr, self.uid
a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64})
a2_store_fname = self.ira.browse(cr, uid, a2).store_fname
self.assertEqual(a2_store_fname, self.blob1_fname)
self.assertTrue(os.path.isfile(os.path.join(self.filestore, a2_store_fname)))
def test_03_no_duplication(self):
registry, cr, uid = self.registry, self.cr, self.uid
a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64})
a2_store_fname = self.ira.browse(cr, uid, a2).store_fname
a3 = self.ira.create(cr, uid, {'name': 'a3', 'datas': self.blob1_b64})
a3_store_fname = self.ira.browse(cr, uid, a3).store_fname
self.assertEqual(a3_store_fname, a2_store_fname)
def test_04_keep_file(self):
registry, cr, uid = self.registry, self.cr, self.uid
a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64})
a3 = self.ira.create(cr, uid, {'name': 'a3', 'datas': self.blob1_b64})
a2_store_fname = self.ira.browse(cr, uid, a2).store_fname
a2_fn = os.path.join(self.filestore, a2_store_fname)
self.ira.unlink(cr, uid, [a3])
self.assertTrue(os.path.isfile(a2_fn))
# delete a2 it is unlinked
self.ira.unlink(cr, uid, [a2])
self.assertFalse(os.path.isfile(a2_fn))
def test_05_change_data_change_file(self):
registry, cr, uid = self.registry, self.cr, self.uid
a2 = self.ira.create(cr, uid, {'name': 'a2', 'datas': self.blob1_b64})
a2_store_fname = self.ira.browse(cr, uid, a2).store_fname
a2_fn = os.path.join(self.filestore, a2_store_fname)
self.assertTrue(os.path.isfile(a2_fn))
self.ira.write(cr, uid, [a2], {'datas': self.blob2_b64})
self.assertFalse(os.path.isfile(a2_fn))
new_a2_store_fname = self.ira.browse(cr, uid, a2).store_fname
self.assertNotEqual(a2_store_fname, new_a2_store_fname)
new_a2_fn = os.path.join(self.filestore, new_a2_store_fname)
self.assertTrue(os.path.isfile(new_a2_fn))
| agpl-3.0 | 6,441,844,423,614,154,000 | 37.434783 | 90 | 0.619061 | false |
zoidbergwill/lint-review | lintreview/config.py | 2 | 5210 | import os
import logging.config
from flask.config import Config
from ConfigParser import ConfigParser
from StringIO import StringIO
def load_config():
"""
Loads the config files merging the defaults
with the file defined in environ.LINTREVIEW_SETTINGS if it exists.
"""
config = Config(os.getcwd())
if 'LINTREVIEW_SETTINGS' in os.environ:
config.from_envvar('LINTREVIEW_SETTINGS')
elif os.path.exists(os.path.join(os.getcwd(), 'settings.py')):
config.from_pyfile('settings.py')
else:
msg = ("Unable to load configuration file. Please "
"either create ./settings.py or set LINTREVIEW_SETTINGS "
"in your environment before running.")
raise ImportError(msg)
if config.get('LOGGING_CONFIG'):
logging.config.fileConfig(
config.get('LOGGING_CONFIG'),
disable_existing_loggers=False)
if config.get('SSL_CA_BUNDLE'):
os.environ['REQUESTS_CA_BUNDLE'] = config.get('SSL_CA_BUNDLE')
return config
def get_lintrc_defaults(config):
"""
Load the default lintrc, if it exists
"""
if config.get('LINTRC_DEFAULTS'):
with open(config.get('LINTRC_DEFAULTS')) as f:
return f.read()
def build_review_config(ini_config, app_config=None):
"""
Build a new ReviewConfig object using the ini config file
and the defaults if they exist in the app_config
"""
config = ReviewConfig()
if app_config:
defaults = get_lintrc_defaults(app_config)
if defaults:
config.load_ini(defaults)
config.load_ini(ini_config)
return config
def comma_value(values):
return map(lambda x: x.strip(), values.split(','))
def newline_value(values):
return map(lambda x: x.strip(), values.split('\n'))
class ReviewConfig(object):
"""
Provides a domain level API to a repositories
.lintrc file. Allows reading tool names and tool configuration
"""
def __init__(self, data=None):
self._data = {}
if data:
self._data = data
def update(self, data):
"""
Does a shallow merge of configuration settings.
This allows repos to control entire tool config by only
defining the keys they want. If we did a recursive merge, the
user config file would have to 'undo' our default file changes.
The one exception is that if the new data has
empty config, and the current data has non-empty config, the
non-empty config will be retained.
"""
for key, value in data.iteritems():
if key == 'linters' and 'linters' in self._data:
self._update_linter_config(value)
else:
self._data[key] = value
def _update_linter_config(self, linter_config):
"""
Update linter config.
Because linter config is a nested structure, it needs to be
updated in a somewhat recursive way.
"""
for linter, tool_config in linter_config.iteritems():
if self._config_update(linter, tool_config):
self._data['linters'][linter] = tool_config
def _config_update(self, linter, tool_config):
if linter not in self.linters():
return True
existing = self.linter_config(linter)
if tool_config == {} and existing != {}:
return False
return True
def linters(self):
try:
return self._data['linters'].keys()
except:
return []
def linter_config(self, tool):
try:
return self._data['linters'][tool]
except:
return {}
def ignore_patterns(self):
try:
return self._data['files']['ignore']
except:
return []
def ignore_branches(self):
try:
return self._data['branches']['ignore']
except:
return []
def load_ini(self, ini_config):
"""
Read the provided ini contents arguments and merge
the data in the ini config into the config object.
ini_config is assumed to be a string of the ini file contents.
"""
parser = ConfigParser()
parser.readfp(StringIO(ini_config))
data = {
'linters': {},
'files': {},
'branches': {},
}
if parser.has_section('files'):
ignore = parser.get('files', 'ignore')
data['files']['ignore'] = newline_value(ignore)
if parser.has_section('branches'):
ignore = parser.get('branches', 'ignore')
data['branches']['ignore'] = comma_value(ignore)
linters = []
if parser.has_section('tools'):
linters = comma_value(parser.get('tools', 'linters'))
# Setup empty config sections
for linter in linters:
data['linters'][linter] = {}
for section in parser.sections():
if not section.startswith('tool_'):
continue
# Strip off tool_
linter = section[5:]
data['linters'][linter] = dict(parser.items(section))
self.update(data)
| mit | -6,261,815,237,069,691,000 | 29.647059 | 72 | 0.583301 | false |
robfig/rietveld | third_party/oauth2client/multistore_file.py | 52 | 12045 | # Copyright 2011 Google Inc. All Rights Reserved.
"""Multi-credential file store with lock support.
This module implements a JSON credential store where multiple
credentials can be stored in one file. That file supports locking
both in a single process and across processes.
The credential themselves are keyed off of:
* client_id
* user_agent
* scope
The format of the stored data is like so:
{
'file_version': 1,
'data': [
{
'key': {
'clientId': '<client id>',
'userAgent': '<user agent>',
'scope': '<scope>'
},
'credential': {
# JSON serialized Credentials.
}
}
]
}
"""
__author__ = '[email protected] (Joe Beda)'
import base64
import errno
import logging
import os
import threading
from anyjson import simplejson
from oauth2client.client import Storage as BaseStorage
from oauth2client.client import Credentials
from oauth2client import util
from locked_file import LockedFile
logger = logging.getLogger(__name__)
# A dict from 'filename'->_MultiStore instances
_multistores = {}
_multistores_lock = threading.Lock()
class Error(Exception):
"""Base error for this module."""
pass
class NewerCredentialStoreError(Error):
"""The credential store is a newer version that supported."""
pass
@util.positional(4)
def get_credential_storage(filename, client_id, user_agent, scope,
warn_on_readonly=True):
"""Get a Storage instance for a credential.
Args:
filename: The JSON file storing a set of credentials
client_id: The client_id for the credential
user_agent: The user agent for the credential
scope: string or iterable of strings, Scope(s) being requested
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
# Recreate the legacy key with these specific parameters
key = {'clientId': client_id, 'userAgent': user_agent,
'scope': util.scopes_to_string(scope)}
return get_credential_storage_custom_key(
filename, key, warn_on_readonly=warn_on_readonly)
@util.positional(2)
def get_credential_storage_custom_string_key(
filename, key_string, warn_on_readonly=True):
"""Get a Storage instance for a credential using a single string as a key.
Allows you to provide a string as a custom key that will be used for
credential storage and retrieval.
Args:
filename: The JSON file storing a set of credentials
key_string: A string to use as the key for storing this credential.
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
# Create a key dictionary that can be used
key_dict = {'key': key_string}
return get_credential_storage_custom_key(
filename, key_dict, warn_on_readonly=warn_on_readonly)
@util.positional(2)
def get_credential_storage_custom_key(
filename, key_dict, warn_on_readonly=True):
"""Get a Storage instance for a credential using a dictionary as a key.
Allows you to provide a dictionary as a custom key that will be used for
credential storage and retrieval.
Args:
filename: The JSON file storing a set of credentials
key_dict: A dictionary to use as the key for storing this credential. There
is no ordering of the keys in the dictionary. Logically equivalent
dictionaries will produce equivalent storage keys.
warn_on_readonly: if True, log a warning if the store is readonly
Returns:
An object derived from client.Storage for getting/setting the
credential.
"""
filename = os.path.expanduser(filename)
_multistores_lock.acquire()
try:
multistore = _multistores.setdefault(
filename, _MultiStore(filename, warn_on_readonly=warn_on_readonly))
finally:
_multistores_lock.release()
key = util.dict_to_tuple_key(key_dict)
return multistore._get_storage(key)
class _MultiStore(object):
"""A file backed store for multiple credentials."""
@util.positional(2)
def __init__(self, filename, warn_on_readonly=True):
"""Initialize the class.
This will create the file if necessary.
"""
self._file = LockedFile(filename, 'r+b', 'rb')
self._thread_lock = threading.Lock()
self._read_only = False
self._warn_on_readonly = warn_on_readonly
self._create_file_if_needed()
# Cache of deserialized store. This is only valid after the
# _MultiStore is locked or _refresh_data_cache is called. This is
# of the form of:
#
# ((key, value), (key, value)...) -> OAuth2Credential
#
# If this is None, then the store hasn't been read yet.
self._data = None
class _Storage(BaseStorage):
"""A Storage object that knows how to read/write a single credential."""
def __init__(self, multistore, key):
self._multistore = multistore
self._key = key
def acquire_lock(self):
"""Acquires any lock necessary to access this Storage.
This lock is not reentrant.
"""
self._multistore._lock()
def release_lock(self):
"""Release the Storage lock.
Trying to release a lock that isn't held will result in a
RuntimeError.
"""
self._multistore._unlock()
def locked_get(self):
"""Retrieve credential.
The Storage lock must be held when this is called.
Returns:
oauth2client.client.Credentials
"""
credential = self._multistore._get_credential(self._key)
if credential:
credential.set_store(self)
return credential
def locked_put(self, credentials):
"""Write a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._update_credential(self._key, credentials)
def locked_delete(self):
"""Delete a credential.
The Storage lock must be held when this is called.
Args:
credentials: Credentials, the credentials to store.
"""
self._multistore._delete_credential(self._key)
def _create_file_if_needed(self):
"""Create an empty file if necessary.
This method will not initialize the file. Instead it implements a
simple version of "touch" to ensure the file has been created.
"""
if not os.path.exists(self._file.filename()):
old_umask = os.umask(0177)
try:
open(self._file.filename(), 'a+b').close()
finally:
os.umask(old_umask)
def _lock(self):
"""Lock the entire multistore."""
self._thread_lock.acquire()
self._file.open_and_lock()
if not self._file.is_locked():
self._read_only = True
if self._warn_on_readonly:
logger.warn('The credentials file (%s) is not writable. Opening in '
'read-only mode. Any refreshed credentials will only be '
'valid for this run.' % self._file.filename())
if os.path.getsize(self._file.filename()) == 0:
logger.debug('Initializing empty multistore file')
# The multistore is empty so write out an empty file.
self._data = {}
self._write()
elif not self._read_only or self._data is None:
# Only refresh the data if we are read/write or we haven't
# cached the data yet. If we are readonly, we assume is isn't
# changing out from under us and that we only have to read it
# once. This prevents us from whacking any new access keys that
# we have cached in memory but were unable to write out.
self._refresh_data_cache()
def _unlock(self):
"""Release the lock on the multistore."""
self._file.unlock_and_close()
self._thread_lock.release()
def _locked_json_read(self):
"""Get the raw content of the multistore file.
The multistore must be locked when this is called.
Returns:
The contents of the multistore decoded as JSON.
"""
assert self._thread_lock.locked()
self._file.file_handle().seek(0)
return simplejson.load(self._file.file_handle())
def _locked_json_write(self, data):
"""Write a JSON serializable data structure to the multistore.
The multistore must be locked when this is called.
Args:
data: The data to be serialized and written.
"""
assert self._thread_lock.locked()
if self._read_only:
return
self._file.file_handle().seek(0)
simplejson.dump(data, self._file.file_handle(), sort_keys=True, indent=2)
self._file.file_handle().truncate()
def _refresh_data_cache(self):
"""Refresh the contents of the multistore.
The multistore must be locked when this is called.
Raises:
NewerCredentialStoreError: Raised when a newer client has written the
store.
"""
self._data = {}
try:
raw_data = self._locked_json_read()
except Exception:
logger.warn('Credential data store could not be loaded. '
'Will ignore and overwrite.')
return
version = 0
try:
version = raw_data['file_version']
except Exception:
logger.warn('Missing version for credential data store. It may be '
'corrupt or an old version. Overwriting.')
if version > 1:
raise NewerCredentialStoreError(
'Credential file has file_version of %d. '
'Only file_version of 1 is supported.' % version)
credentials = []
try:
credentials = raw_data['data']
except (TypeError, KeyError):
pass
for cred_entry in credentials:
try:
(key, credential) = self._decode_credential_from_json(cred_entry)
self._data[key] = credential
except:
# If something goes wrong loading a credential, just ignore it
logger.info('Error decoding credential, skipping', exc_info=True)
def _decode_credential_from_json(self, cred_entry):
"""Load a credential from our JSON serialization.
Args:
cred_entry: A dict entry from the data member of our format
Returns:
(key, cred) where the key is the key tuple and the cred is the
OAuth2Credential object.
"""
raw_key = cred_entry['key']
key = util.dict_to_tuple_key(raw_key)
credential = None
credential = Credentials.new_from_json(simplejson.dumps(cred_entry['credential']))
return (key, credential)
def _write(self):
"""Write the cached data back out.
The multistore must be locked.
"""
raw_data = {'file_version': 1}
raw_creds = []
raw_data['data'] = raw_creds
for (cred_key, cred) in self._data.items():
raw_key = dict(cred_key)
raw_cred = simplejson.loads(cred.to_json())
raw_creds.append({'key': raw_key, 'credential': raw_cred})
self._locked_json_write(raw_data)
def _get_credential(self, key):
"""Get a credential from the multistore.
The multistore must be locked.
Args:
key: The key used to retrieve the credential
Returns:
The credential specified or None if not present
"""
return self._data.get(key, None)
def _update_credential(self, key, cred):
"""Update a credential and write the multistore.
This must be called when the multistore is locked.
Args:
key: The key used to retrieve the credential
cred: The OAuth2Credential to update/set
"""
self._data[key] = cred
self._write()
def _delete_credential(self, key):
"""Delete a credential and write the multistore.
This must be called when the multistore is locked.
Args:
key: The key used to retrieve the credential
"""
try:
del self._data[key]
except KeyError:
pass
self._write()
def _get_storage(self, key):
"""Get a Storage object to get/set a credential.
This Storage is a 'view' into the multistore.
Args:
key: The key used to retrieve the credential
Returns:
A Storage object that can be used to get/set this cred
"""
return self._Storage(self, key)
| apache-2.0 | 8,741,447,103,262,036,000 | 28.449878 | 86 | 0.661934 | false |
hzruandd/AutobahnPython | examples/twisted/wamp/pubsub/unsubscribe/backend.py | 8 | 2223 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import print_function
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that publishes an event every second.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
counter = 0
while True:
print("publish: com.myapp.topic1", counter)
self.publish(u'com.myapp.topic1', counter)
counter += 1
yield sleep(1)
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"crossbardemo",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
| mit | -2,893,989,510,565,218,300 | 36.05 | 79 | 0.665317 | false |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py | 26 | 4388 | # Author: Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal
from pytest import raises as assert_raises
from scipy import sparse
from scipy.sparse import csgraph
def _explicit_laplacian(x, normed=False):
if sparse.issparse(x):
x = x.todense()
x = np.asarray(x)
y = -1.0 * x
for j in range(y.shape[0]):
y[j,j] = x[j,j+1:].sum() + x[j,:j].sum()
if normed:
d = np.diag(y).copy()
d[d == 0] = 1.0
y /= d[:,None]**.5
y /= d[None,:]**.5
return y
def _check_symmetric_graph_laplacian(mat, normed):
if not hasattr(mat, 'shape'):
mat = eval(mat, dict(np=np, sparse=sparse))
if sparse.issparse(mat):
sp_mat = mat
mat = sp_mat.todense()
else:
sp_mat = sparse.csr_matrix(mat)
laplacian = csgraph.laplacian(mat, normed=normed)
n_nodes = mat.shape[0]
if not normed:
assert_array_almost_equal(laplacian.sum(axis=0), np.zeros(n_nodes))
assert_array_almost_equal(laplacian.T, laplacian)
assert_array_almost_equal(laplacian,
csgraph.laplacian(sp_mat, normed=normed).todense())
assert_array_almost_equal(laplacian,
_explicit_laplacian(mat, normed=normed))
def test_laplacian_value_error():
for t in int, float, complex:
for m in ([1, 1],
[[[1]]],
[[1, 2, 3], [4, 5, 6]],
[[1, 2], [3, 4], [5, 5]]):
A = np.array(m, dtype=t)
assert_raises(ValueError, csgraph.laplacian, A)
def test_symmetric_graph_laplacian():
symmetric_mats = ('np.arange(10) * np.arange(10)[:, np.newaxis]',
'np.ones((7, 7))',
'np.eye(19)',
'sparse.diags([1, 1], [-1, 1], shape=(4,4))',
'sparse.diags([1, 1], [-1, 1], shape=(4,4)).todense()',
'np.asarray(sparse.diags([1, 1], [-1, 1], shape=(4,4)).todense())',
'np.vander(np.arange(4)) + np.vander(np.arange(4)).T')
for mat_str in symmetric_mats:
for normed in True, False:
_check_symmetric_graph_laplacian(mat_str, normed)
def _assert_allclose_sparse(a, b, **kwargs):
# helper function that can deal with sparse matrices
if sparse.issparse(a):
a = a.toarray()
if sparse.issparse(b):
b = a.toarray()
assert_allclose(a, b, **kwargs)
def _check_laplacian(A, desired_L, desired_d, normed, use_out_degree):
for arr_type in np.array, sparse.csr_matrix, sparse.coo_matrix:
for t in int, float, complex:
adj = arr_type(A, dtype=t)
L = csgraph.laplacian(adj, normed=normed, return_diag=False,
use_out_degree=use_out_degree)
_assert_allclose_sparse(L, desired_L, atol=1e-12)
L, d = csgraph.laplacian(adj, normed=normed, return_diag=True,
use_out_degree=use_out_degree)
_assert_allclose_sparse(L, desired_L, atol=1e-12)
_assert_allclose_sparse(d, desired_d, atol=1e-12)
def test_asymmetric_laplacian():
# adjacency matrix
A = [[0, 1, 0],
[4, 2, 0],
[0, 0, 0]]
# Laplacian matrix using out-degree
L = [[1, -1, 0],
[-4, 4, 0],
[0, 0, 0]]
d = [1, 4, 0]
_check_laplacian(A, L, d, normed=False, use_out_degree=True)
# normalized Laplacian matrix using out-degree
L = [[1, -0.5, 0],
[-2, 1, 0],
[0, 0, 0]]
d = [1, 2, 1]
_check_laplacian(A, L, d, normed=True, use_out_degree=True)
# Laplacian matrix using in-degree
L = [[4, -1, 0],
[-4, 1, 0],
[0, 0, 0]]
d = [4, 1, 0]
_check_laplacian(A, L, d, normed=False, use_out_degree=False)
# normalized Laplacian matrix using in-degree
L = [[1, -0.5, 0],
[-2, 1, 0],
[0, 0, 0]]
d = [2, 1, 1]
_check_laplacian(A, L, d, normed=True, use_out_degree=False)
def test_sparse_formats():
for fmt in ('csr', 'csc', 'coo', 'lil', 'dok', 'dia', 'bsr'):
mat = sparse.diags([1, 1], [-1, 1], shape=(4,4), format=fmt)
for normed in True, False:
_check_symmetric_graph_laplacian(mat, normed)
| gpl-3.0 | -4,008,578,468,169,761,000 | 31.264706 | 79 | 0.553099 | false |
upgoingstar/datasploit | emails/email_scribd.py | 2 | 1462 | #!/usr/bin/env python
import base
import re
import sys
import requests
from termcolor import colored
# Control whether the module is enabled or not
ENABLED = True
class style:
BOLD = '\033[1m'
END = '\033[0m'
def banner():
print colored(style.BOLD + '\n---> Searching Scribd Docs\n' + style.END, 'blue')
def main(email):
req = requests.get('https://www.scribd.com/search?page=1&content_type=documents&query=%s' % (email))
m = re.findall('(?<=https://www.scribd.com/doc/)\w+', req.text.encode('UTF-8'))
m = set(m)
m = list(m)
links = []
length = len(m)
for lt in range(0, length - 1):
links.append("https://www.scribd.com/doc/" + m[lt])
return links
def output(data, email=""):
if data:
print "Found %s associated SCRIBD documents:\n" % len(data)
for link in data:
print link
print ""
print colored(style.BOLD + 'More results might be available, please follow this link:' + style.END)
print "https://www.scribd.com/search?page=1&content_type=documents&query=" + email
else:
print colored('[-] No Associated Scribd Documents found.', 'red')
if __name__ == "__main__":
try:
email = sys.argv[1]
banner()
result = main(email)
output(result, email)
except Exception as e:
print e
print "Please provide an email as argument"
| gpl-3.0 | -6,765,973,988,487,786,000 | 24.581818 | 107 | 0.582763 | false |
tumbl3w33d/ansible | test/lib/ansible_test/_internal/manage_ci.py | 17 | 10692 | """Access Ansible Core CI remote services."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
import time
from .util import (
SubprocessError,
ApplicationError,
cmd_quote,
display,
ANSIBLE_TEST_DATA_ROOT,
)
from .util_common import (
intercept_command,
run_command,
)
from .core_ci import (
AnsibleCoreCI,
)
from .ansible_util import (
ansible_environment,
)
from .config import (
ShellConfig,
)
from .payload import (
create_payload,
)
class ManageWindowsCI:
"""Manage access to a Windows instance provided by Ansible Core CI."""
def __init__(self, core_ci):
"""
:type core_ci: AnsibleCoreCI
"""
self.core_ci = core_ci
self.ssh_args = ['-i', self.core_ci.ssh_key.key]
ssh_options = dict(
BatchMode='yes',
StrictHostKeyChecking='no',
UserKnownHostsFile='/dev/null',
ServerAliveInterval=15,
ServerAliveCountMax=4,
)
for ssh_option in sorted(ssh_options):
self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])]
def setup(self, python_version):
"""Used in delegate_remote to setup the host, no action is required for Windows.
:type python_version: str
"""
def wait(self):
"""Wait for instance to respond to ansible ping."""
extra_vars = [
'ansible_connection=winrm',
'ansible_host=%s' % self.core_ci.connection.hostname,
'ansible_user=%s' % self.core_ci.connection.username,
'ansible_password=%s' % self.core_ci.connection.password,
'ansible_port=%s' % self.core_ci.connection.port,
'ansible_winrm_server_cert_validation=ignore',
]
name = 'windows_%s' % self.core_ci.version
env = ansible_environment(self.core_ci.args)
cmd = ['ansible', '-m', 'win_ping', '-i', '%s,' % name, name, '-e', ' '.join(extra_vars)]
for dummy in range(1, 120):
try:
intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True)
return
except SubprocessError:
time.sleep(10)
raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
(self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
def download(self, remote, local):
"""
:type remote: str
:type local: str
"""
self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local)
def upload(self, local, remote):
"""
:type local: str
:type remote: str
"""
self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote))
def ssh(self, command, options=None, force_pty=True):
"""
:type command: str | list[str]
:type options: list[str] | None
:type force_pty: bool
"""
if not options:
options = []
if force_pty:
options.append('-tt')
if isinstance(command, list):
command = ' '.join(cmd_quote(c) for c in command)
run_command(self.core_ci.args,
['ssh', '-q'] + self.ssh_args +
options +
['-p', '22',
'%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] +
[command])
def scp(self, src, dst):
"""
:type src: str
:type dst: str
"""
for dummy in range(1, 10):
try:
run_command(self.core_ci.args,
['scp'] + self.ssh_args +
['-P', '22', '-q', '-r', src, dst])
return
except SubprocessError:
time.sleep(10)
raise ApplicationError('Failed transfer: %s -> %s' % (src, dst))
class ManageNetworkCI:
"""Manage access to a network instance provided by Ansible Core CI."""
def __init__(self, core_ci):
"""
:type core_ci: AnsibleCoreCI
"""
self.core_ci = core_ci
def wait(self):
"""Wait for instance to respond to ansible ping."""
extra_vars = [
'ansible_host=%s' % self.core_ci.connection.hostname,
'ansible_port=%s' % self.core_ci.connection.port,
'ansible_connection=local',
'ansible_ssh_private_key_file=%s' % self.core_ci.ssh_key.key,
]
name = '%s-%s' % (self.core_ci.platform, self.core_ci.version.replace('.', '-'))
env = ansible_environment(self.core_ci.args)
cmd = [
'ansible',
'-m', '%s_command' % self.core_ci.platform,
'-a', 'commands=?',
'-u', self.core_ci.connection.username,
'-i', '%s,' % name,
'-e', ' '.join(extra_vars),
name,
]
for dummy in range(1, 90):
try:
intercept_command(self.core_ci.args, cmd, 'ping', env=env, disable_coverage=True)
return
except SubprocessError:
time.sleep(10)
raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
(self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
class ManagePosixCI:
"""Manage access to a POSIX instance provided by Ansible Core CI."""
def __init__(self, core_ci):
"""
:type core_ci: AnsibleCoreCI
"""
self.core_ci = core_ci
self.ssh_args = ['-i', self.core_ci.ssh_key.key]
ssh_options = dict(
BatchMode='yes',
StrictHostKeyChecking='no',
UserKnownHostsFile='/dev/null',
ServerAliveInterval=15,
ServerAliveCountMax=4,
)
for ssh_option in sorted(ssh_options):
self.ssh_args += ['-o', '%s=%s' % (ssh_option, ssh_options[ssh_option])]
if self.core_ci.platform == 'freebsd':
if self.core_ci.provider == 'aws':
self.become = ['su', '-l', 'root', '-c']
elif self.core_ci.provider == 'azure':
self.become = ['sudo', '-in', 'sh', '-c']
else:
raise NotImplementedError('provider %s has not been implemented' % self.core_ci.provider)
elif self.core_ci.platform == 'osx':
self.become = ['sudo', '-in', 'PATH=/usr/local/bin:$PATH']
elif self.core_ci.platform == 'rhel':
self.become = ['sudo', '-in', 'bash', '-c']
def setup(self, python_version):
"""Start instance and wait for it to become ready and respond to an ansible ping.
:type python_version: str
:rtype: str
"""
pwd = self.wait()
display.info('Remote working directory: %s' % pwd, verbosity=1)
if isinstance(self.core_ci.args, ShellConfig):
if self.core_ci.args.raw:
return pwd
self.configure(python_version)
self.upload_source()
return pwd
def wait(self): # type: () -> str
"""Wait for instance to respond to SSH."""
for dummy in range(1, 90):
try:
stdout = self.ssh('pwd', capture=True)[0]
if self.core_ci.args.explain:
return '/pwd'
pwd = stdout.strip().splitlines()[-1]
if not pwd.startswith('/'):
raise Exception('Unexpected current working directory "%s" from "pwd" command output:\n%s' % (pwd, stdout))
return pwd
except SubprocessError:
time.sleep(10)
raise ApplicationError('Timeout waiting for %s/%s instance %s.' %
(self.core_ci.platform, self.core_ci.version, self.core_ci.instance_id))
def configure(self, python_version):
"""Configure remote host for testing.
:type python_version: str
"""
self.upload(os.path.join(ANSIBLE_TEST_DATA_ROOT, 'setup', 'remote.sh'), '/tmp')
self.ssh('chmod +x /tmp/remote.sh && /tmp/remote.sh %s %s' % (self.core_ci.platform, python_version))
def upload_source(self):
"""Upload and extract source."""
with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as local_source_fd:
remote_source_dir = '/tmp'
remote_source_path = os.path.join(remote_source_dir, os.path.basename(local_source_fd.name))
create_payload(self.core_ci.args, local_source_fd.name)
self.upload(local_source_fd.name, remote_source_dir)
self.ssh('rm -rf ~/ansible && mkdir ~/ansible && cd ~/ansible && tar oxzf %s' % remote_source_path)
def download(self, remote, local):
"""
:type remote: str
:type local: str
"""
self.scp('%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote), local)
def upload(self, local, remote):
"""
:type local: str
:type remote: str
"""
self.scp(local, '%s@%s:%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname, remote))
def ssh(self, command, options=None, capture=False):
"""
:type command: str | list[str]
:type options: list[str] | None
:type capture: bool
:rtype: str | None, str | None
"""
if not options:
options = []
if isinstance(command, list):
command = ' '.join(cmd_quote(c) for c in command)
return run_command(self.core_ci.args,
['ssh', '-tt', '-q'] + self.ssh_args +
options +
['-p', str(self.core_ci.connection.port),
'%s@%s' % (self.core_ci.connection.username, self.core_ci.connection.hostname)] +
self.become + [cmd_quote(command)], capture=capture)
def scp(self, src, dst):
"""
:type src: str
:type dst: str
"""
for dummy in range(1, 10):
try:
run_command(self.core_ci.args,
['scp'] + self.ssh_args +
['-P', str(self.core_ci.connection.port), '-q', '-r', src, dst])
return
except SubprocessError:
time.sleep(10)
raise ApplicationError('Failed transfer: %s -> %s' % (src, dst))
| gpl-3.0 | 5,801,061,022,044,204,000 | 32.4125 | 127 | 0.528152 | false |
mnahm5/django-estore | Lib/site-packages/cryptography/hazmat/primitives/asymmetric/rsa.py | 17 | 10226 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
from fractions import gcd
import six
from cryptography import utils
from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
from cryptography.hazmat.backends.interfaces import RSABackend
@six.add_metaclass(abc.ABCMeta)
class RSAPrivateKey(object):
@abc.abstractmethod
def signer(self, padding, algorithm):
"""
Returns an AsymmetricSignatureContext used for signing data.
"""
@abc.abstractmethod
def decrypt(self, ciphertext, padding):
"""
Decrypts the provided ciphertext.
"""
@abc.abstractproperty
def key_size(self):
"""
The bit length of the public modulus.
"""
@abc.abstractmethod
def public_key(self):
"""
The RSAPublicKey associated with this private key.
"""
@abc.abstractmethod
def sign(self, data, padding, algorithm):
"""
Signs the data.
"""
@six.add_metaclass(abc.ABCMeta)
class RSAPrivateKeyWithSerialization(RSAPrivateKey):
@abc.abstractmethod
def private_numbers(self):
"""
Returns an RSAPrivateNumbers.
"""
@abc.abstractmethod
def private_bytes(self, encoding, format, encryption_algorithm):
"""
Returns the key serialized as bytes.
"""
@six.add_metaclass(abc.ABCMeta)
class RSAPublicKey(object):
@abc.abstractmethod
def verifier(self, signature, padding, algorithm):
"""
Returns an AsymmetricVerificationContext used for verifying signatures.
"""
@abc.abstractmethod
def encrypt(self, plaintext, padding):
"""
Encrypts the given plaintext.
"""
@abc.abstractproperty
def key_size(self):
"""
The bit length of the public modulus.
"""
@abc.abstractmethod
def public_numbers(self):
"""
Returns an RSAPublicNumbers
"""
@abc.abstractmethod
def public_bytes(self, encoding, format):
"""
Returns the key serialized as bytes.
"""
@abc.abstractmethod
def verify(self, signature, data, padding, algorithm):
"""
Verifies the signature of the data.
"""
RSAPublicKeyWithSerialization = RSAPublicKey
def generate_private_key(public_exponent, key_size, backend):
if not isinstance(backend, RSABackend):
raise UnsupportedAlgorithm(
"Backend object does not implement RSABackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
_verify_rsa_parameters(public_exponent, key_size)
return backend.generate_rsa_private_key(public_exponent, key_size)
def _verify_rsa_parameters(public_exponent, key_size):
if public_exponent < 3:
raise ValueError("public_exponent must be >= 3.")
if public_exponent & 1 == 0:
raise ValueError("public_exponent must be odd.")
if key_size < 512:
raise ValueError("key_size must be at least 512-bits.")
def _check_private_key_components(p, q, private_exponent, dmp1, dmq1, iqmp,
public_exponent, modulus):
if modulus < 3:
raise ValueError("modulus must be >= 3.")
if p >= modulus:
raise ValueError("p must be < modulus.")
if q >= modulus:
raise ValueError("q must be < modulus.")
if dmp1 >= modulus:
raise ValueError("dmp1 must be < modulus.")
if dmq1 >= modulus:
raise ValueError("dmq1 must be < modulus.")
if iqmp >= modulus:
raise ValueError("iqmp must be < modulus.")
if private_exponent >= modulus:
raise ValueError("private_exponent must be < modulus.")
if public_exponent < 3 or public_exponent >= modulus:
raise ValueError("public_exponent must be >= 3 and < modulus.")
if public_exponent & 1 == 0:
raise ValueError("public_exponent must be odd.")
if dmp1 & 1 == 0:
raise ValueError("dmp1 must be odd.")
if dmq1 & 1 == 0:
raise ValueError("dmq1 must be odd.")
if p * q != modulus:
raise ValueError("p*q must equal modulus.")
def _check_public_key_components(e, n):
if n < 3:
raise ValueError("n must be >= 3.")
if e < 3 or e >= n:
raise ValueError("e must be >= 3 and < n.")
if e & 1 == 0:
raise ValueError("e must be odd.")
def _modinv(e, m):
"""
Modular Multiplicative Inverse. Returns x such that: (x*e) mod m == 1
"""
x1, y1, x2, y2 = 1, 0, 0, 1
a, b = e, m
while b > 0:
q, r = divmod(a, b)
xn, yn = x1 - q * x2, y1 - q * y2
a, b, x1, y1, x2, y2 = b, r, x2, y2, xn, yn
return x1 % m
def rsa_crt_iqmp(p, q):
"""
Compute the CRT (q ** -1) % p value from RSA primes p and q.
"""
return _modinv(q, p)
def rsa_crt_dmp1(private_exponent, p):
"""
Compute the CRT private_exponent % (p - 1) value from the RSA
private_exponent (d) and p.
"""
return private_exponent % (p - 1)
def rsa_crt_dmq1(private_exponent, q):
"""
Compute the CRT private_exponent % (q - 1) value from the RSA
private_exponent (d) and q.
"""
return private_exponent % (q - 1)
# Controls the number of iterations rsa_recover_prime_factors will perform
# to obtain the prime factors. Each iteration increments by 2 so the actual
# maximum attempts is half this number.
_MAX_RECOVERY_ATTEMPTS = 1000
def rsa_recover_prime_factors(n, e, d):
"""
Compute factors p and q from the private exponent d. We assume that n has
no more than two factors. This function is adapted from code in PyCrypto.
"""
# See 8.2.2(i) in Handbook of Applied Cryptography.
ktot = d * e - 1
# The quantity d*e-1 is a multiple of phi(n), even,
# and can be represented as t*2^s.
t = ktot
while t % 2 == 0:
t = t // 2
# Cycle through all multiplicative inverses in Zn.
# The algorithm is non-deterministic, but there is a 50% chance
# any candidate a leads to successful factoring.
# See "Digitalized Signatures and Public Key Functions as Intractable
# as Factorization", M. Rabin, 1979
spotted = False
a = 2
while not spotted and a < _MAX_RECOVERY_ATTEMPTS:
k = t
# Cycle through all values a^{t*2^i}=a^k
while k < ktot:
cand = pow(a, k, n)
# Check if a^k is a non-trivial root of unity (mod n)
if cand != 1 and cand != (n - 1) and pow(cand, 2, n) == 1:
# We have found a number such that (cand-1)(cand+1)=0 (mod n).
# Either of the terms divides n.
p = gcd(cand + 1, n)
spotted = True
break
k *= 2
# This value was not any good... let's try another!
a += 2
if not spotted:
raise ValueError("Unable to compute factors p and q from exponent d.")
# Found !
q, r = divmod(n, p)
assert r == 0
p, q = sorted((p, q), reverse=True)
return (p, q)
class RSAPrivateNumbers(object):
def __init__(self, p, q, d, dmp1, dmq1, iqmp,
public_numbers):
if (
not isinstance(p, six.integer_types) or
not isinstance(q, six.integer_types) or
not isinstance(d, six.integer_types) or
not isinstance(dmp1, six.integer_types) or
not isinstance(dmq1, six.integer_types) or
not isinstance(iqmp, six.integer_types)
):
raise TypeError(
"RSAPrivateNumbers p, q, d, dmp1, dmq1, iqmp arguments must"
" all be an integers."
)
if not isinstance(public_numbers, RSAPublicNumbers):
raise TypeError(
"RSAPrivateNumbers public_numbers must be an RSAPublicNumbers"
" instance."
)
self._p = p
self._q = q
self._d = d
self._dmp1 = dmp1
self._dmq1 = dmq1
self._iqmp = iqmp
self._public_numbers = public_numbers
p = utils.read_only_property("_p")
q = utils.read_only_property("_q")
d = utils.read_only_property("_d")
dmp1 = utils.read_only_property("_dmp1")
dmq1 = utils.read_only_property("_dmq1")
iqmp = utils.read_only_property("_iqmp")
public_numbers = utils.read_only_property("_public_numbers")
def private_key(self, backend):
return backend.load_rsa_private_numbers(self)
def __eq__(self, other):
if not isinstance(other, RSAPrivateNumbers):
return NotImplemented
return (
self.p == other.p and
self.q == other.q and
self.d == other.d and
self.dmp1 == other.dmp1 and
self.dmq1 == other.dmq1 and
self.iqmp == other.iqmp and
self.public_numbers == other.public_numbers
)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((
self.p,
self.q,
self.d,
self.dmp1,
self.dmq1,
self.iqmp,
self.public_numbers,
))
class RSAPublicNumbers(object):
def __init__(self, e, n):
if (
not isinstance(e, six.integer_types) or
not isinstance(n, six.integer_types)
):
raise TypeError("RSAPublicNumbers arguments must be integers.")
self._e = e
self._n = n
e = utils.read_only_property("_e")
n = utils.read_only_property("_n")
def public_key(self, backend):
return backend.load_rsa_public_numbers(self)
def __repr__(self):
return "<RSAPublicNumbers(e={0.e}, n={0.n})>".format(self)
def __eq__(self, other):
if not isinstance(other, RSAPublicNumbers):
return NotImplemented
return self.e == other.e and self.n == other.n
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.e, self.n))
| mit | -8,585,657,132,831,152,000 | 27.093407 | 79 | 0.584882 | false |
defance/edx-platform | lms/djangoapps/ccx/tests/test_field_override_performance.py | 27 | 9659 | # coding=UTF-8
"""
Performance tests for field overrides.
"""
import ddt
import itertools
import mock
from nose.plugins.skip import SkipTest
from courseware.views import progress
from courseware.field_overrides import OverrideFieldData
from datetime import datetime
from django.conf import settings
from django.core.cache import caches
from django.test.client import RequestFactory
from django.test.utils import override_settings
from edxmako.middleware import MakoMiddleware
from nose.plugins.attrib import attr
from pytz import UTC
from request_cache.middleware import RequestCache
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from xblock.core import XBlock
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, \
TEST_DATA_SPLIT_MODULESTORE, TEST_DATA_MONGO_MODULESTORE
from xmodule.modulestore.tests.factories import check_mongo_calls_range, CourseFactory, check_sum_of_calls
from xmodule.modulestore.tests.utils import ProceduralCourseTestMixin
from ccx_keys.locator import CCXLocator
from lms.djangoapps.ccx.tests.factories import CcxFactory
@attr('shard_1')
@mock.patch.dict(
'django.conf.settings.FEATURES',
{
'ENABLE_XBLOCK_VIEW_ENDPOINT': True,
'ENABLE_MAX_SCORE_CACHE': False,
}
)
@ddt.ddt
class FieldOverridePerformanceTestCase(ProceduralCourseTestMixin,
ModuleStoreTestCase):
"""
Base class for instrumenting SQL queries and Mongo reads for field override
providers.
"""
__test__ = False
# TEST_DATA must be overridden by subclasses
TEST_DATA = None
def setUp(self):
"""
Create a test client, course, and user.
"""
super(FieldOverridePerformanceTestCase, self).setUp()
self.request_factory = RequestFactory()
self.student = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.student
self.course = None
self.ccx = None
MakoMiddleware().process_request(self.request)
def setup_course(self, size, enable_ccx, view_as_ccx):
"""
Build a gradable course where each node has `size` children.
"""
grading_policy = {
"GRADER": [
{
"drop_count": 2,
"min_count": 12,
"short_label": "HW",
"type": "Homework",
"weight": 0.15
},
{
"drop_count": 2,
"min_count": 12,
"type": "Lab",
"weight": 0.15
},
{
"drop_count": 0,
"min_count": 1,
"short_label": "Midterm",
"type": "Midterm Exam",
"weight": 0.3
},
{
"drop_count": 0,
"min_count": 1,
"short_label": "Final",
"type": "Final Exam",
"weight": 0.4
}
],
"GRADE_CUTOFFS": {
"Pass": 0.5
}
}
self.course = CourseFactory.create(
graded=True,
start=datetime.now(UTC),
grading_policy=grading_policy,
enable_ccx=enable_ccx,
)
self.populate_course(size)
course_key = self.course.id
if enable_ccx:
self.ccx = CcxFactory.create(course_id=self.course.id)
if view_as_ccx:
course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
CourseEnrollment.enroll(
self.student,
course_key
)
def grade_course(self, course, view_as_ccx):
"""
Renders the progress page for the given course.
"""
course_key = course.id
if view_as_ccx:
course_key = CCXLocator.from_course_locator(course_key, self.ccx.id)
return progress(
self.request,
course_id=unicode(course_key),
student_id=self.student.id
)
def assertMongoCallCount(self, calls):
"""
Assert that mongodb is queried ``calls`` times in the surrounded
context.
"""
return check_mongo_calls_range(max_finds=calls)
def assertXBlockInstantiations(self, instantiations):
"""
Assert that exactly ``instantiations`` XBlocks are instantiated in
the surrounded context.
"""
return check_sum_of_calls(XBlock, ['__init__'], instantiations, instantiations, include_arguments=False)
def instrument_course_progress_render(self, course_width, enable_ccx, view_as_ccx, queries, reads, xblocks):
"""
Renders the progress page, instrumenting Mongo reads and SQL queries.
"""
self.setup_course(course_width, enable_ccx, view_as_ccx)
# Switch to published-only mode to simulate the LMS
with self.settings(MODULESTORE_BRANCH='published-only'):
# Clear all caches before measuring
for cache in settings.CACHES:
caches[cache].clear()
# Refill the metadata inheritance cache
modulestore().get_course(self.course.id, depth=None)
# We clear the request cache to simulate a new request in the LMS.
RequestCache.clear_request_cache()
# Reset the list of provider classes, so that our django settings changes
# can actually take affect.
OverrideFieldData.provider_classes = None
with self.assertNumQueries(queries):
with self.assertMongoCallCount(reads):
with self.assertXBlockInstantiations(xblocks):
self.grade_course(self.course, view_as_ccx)
@ddt.data(*itertools.product(('no_overrides', 'ccx'), range(1, 4), (True, False), (True, False)))
@ddt.unpack
@override_settings(
FIELD_OVERRIDE_PROVIDERS=(),
)
def test_field_overrides(self, overrides, course_width, enable_ccx, view_as_ccx):
"""
Test without any field overrides.
"""
providers = {
'no_overrides': (),
'ccx': ('ccx.overrides.CustomCoursesForEdxOverrideProvider',)
}
if overrides == 'no_overrides' and view_as_ccx:
raise SkipTest("Can't view a ccx course if field overrides are disabled.")
if not enable_ccx and view_as_ccx:
raise SkipTest("Can't view a ccx course if ccx is disabled on the course")
if self.MODULESTORE == TEST_DATA_MONGO_MODULESTORE and view_as_ccx:
raise SkipTest("Can't use a MongoModulestore test as a CCX course")
with self.settings(FIELD_OVERRIDE_PROVIDERS=providers[overrides]):
queries, reads, xblocks = self.TEST_DATA[(overrides, course_width, enable_ccx, view_as_ccx)]
self.instrument_course_progress_render(course_width, enable_ccx, view_as_ccx, queries, reads, xblocks)
class TestFieldOverrideMongoPerformance(FieldOverridePerformanceTestCase):
"""
Test cases for instrumenting field overrides against the Mongo modulestore.
"""
MODULESTORE = TEST_DATA_MONGO_MODULESTORE
__test__ = True
TEST_DATA = {
# (providers, course_width, enable_ccx, view_as_ccx): # of sql queries, # of mongo queries, # of xblocks
('no_overrides', 1, True, False): (48, 6, 13),
('no_overrides', 2, True, False): (135, 6, 84),
('no_overrides', 3, True, False): (480, 6, 335),
('ccx', 1, True, False): (48, 6, 13),
('ccx', 2, True, False): (135, 6, 84),
('ccx', 3, True, False): (480, 6, 335),
('ccx', 1, True, True): (48, 6, 13),
('ccx', 2, True, True): (135, 6, 84),
('ccx', 3, True, True): (480, 6, 335),
('no_overrides', 1, False, False): (48, 6, 13),
('no_overrides', 2, False, False): (135, 6, 84),
('no_overrides', 3, False, False): (480, 6, 335),
('ccx', 1, False, False): (48, 6, 13),
('ccx', 2, False, False): (135, 6, 84),
('ccx', 3, False, False): (480, 6, 335),
('ccx', 1, False, True): (48, 6, 13),
('ccx', 2, False, True): (135, 6, 84),
('ccx', 3, False, True): (480, 6, 335),
}
class TestFieldOverrideSplitPerformance(FieldOverridePerformanceTestCase):
"""
Test cases for instrumenting field overrides against the Split modulestore.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
__test__ = True
TEST_DATA = {
('no_overrides', 1, True, False): (48, 4, 9),
('no_overrides', 2, True, False): (135, 19, 54),
('no_overrides', 3, True, False): (480, 84, 215),
('ccx', 1, True, False): (48, 4, 9),
('ccx', 2, True, False): (135, 19, 54),
('ccx', 3, True, False): (480, 84, 215),
('ccx', 1, True, True): (50, 4, 13),
('ccx', 2, True, True): (137, 19, 84),
('ccx', 3, True, True): (482, 84, 335),
('no_overrides', 1, False, False): (48, 4, 9),
('no_overrides', 2, False, False): (135, 19, 54),
('no_overrides', 3, False, False): (480, 84, 215),
('ccx', 1, False, False): (48, 4, 9),
('ccx', 2, False, False): (135, 19, 54),
('ccx', 3, False, False): (480, 84, 215),
('ccx', 1, False, True): (48, 4, 9),
('ccx', 2, False, True): (135, 19, 54),
('ccx', 3, False, True): (480, 84, 215),
}
| agpl-3.0 | 739,997,729,702,790,500 | 35.587121 | 114 | 0.572005 | false |
zaccoz/odoo | addons/web_diagram/controllers/main.py | 268 | 4321 | import openerp
from openerp.tools.safe_eval import safe_eval as eval
class DiagramView(openerp.http.Controller):
@openerp.http.route('/web_diagram/diagram/get_diagram_info', type='json', auth='user')
def get_diagram_info(self, req, id, model, node, connector,
src_node, des_node, label, **kw):
visible_node_fields = kw.get('visible_node_fields',[])
invisible_node_fields = kw.get('invisible_node_fields',[])
node_fields_string = kw.get('node_fields_string',[])
connector_fields = kw.get('connector_fields',[])
connector_fields_string = kw.get('connector_fields_string',[])
bgcolors = {}
shapes = {}
bgcolor = kw.get('bgcolor','')
shape = kw.get('shape','')
if bgcolor:
for color_spec in bgcolor.split(';'):
if color_spec:
colour, color_state = color_spec.split(':')
bgcolors[colour] = color_state
if shape:
for shape_spec in shape.split(';'):
if shape_spec:
shape_colour, shape_color_state = shape_spec.split(':')
shapes[shape_colour] = shape_color_state
ir_view = req.session.model('ir.ui.view')
graphs = ir_view.graph_get(
int(id), model, node, connector, src_node, des_node, label,
(140, 180), req.session.context)
nodes = graphs['nodes']
transitions = graphs['transitions']
isolate_nodes = {}
for blnk_node in graphs['blank_nodes']:
isolate_nodes[blnk_node['id']] = blnk_node
else:
y = map(lambda t: t['y'],filter(lambda x: x['y'] if x['x']==20 else None, nodes.values()))
y_max = (y and max(y)) or 120
connectors = {}
list_tr = []
for tr in transitions:
list_tr.append(tr)
connectors.setdefault(tr, {
'id': int(tr),
's_id': transitions[tr][0],
'd_id': transitions[tr][1]
})
connector_tr = req.session.model(connector)
connector_ids = connector_tr.search([('id', 'in', list_tr)], 0, 0, 0, req.session.context)
data_connectors =connector_tr.read(connector_ids, connector_fields, req.session.context)
for tr in data_connectors:
transition_id = str(tr['id'])
_sourceid, label = graphs['label'][transition_id]
t = connectors[transition_id]
t.update(
source=tr[src_node][1],
destination=tr[des_node][1],
options={},
signal=label
)
for i, fld in enumerate(connector_fields):
t['options'][connector_fields_string[i]] = tr[fld]
fields = req.session.model('ir.model.fields')
field_ids = fields.search([('model', '=', model), ('relation', '=', node)], 0, 0, 0, req.session.context)
field_data = fields.read(field_ids, ['relation_field'], req.session.context)
node_act = req.session.model(node)
search_acts = node_act.search([(field_data[0]['relation_field'], '=', id)], 0, 0, 0, req.session.context)
data_acts = node_act.read(search_acts, invisible_node_fields + visible_node_fields, req.session.context)
for act in data_acts:
n = nodes.get(str(act['id']))
if not n:
n = isolate_nodes.get(act['id'], {})
y_max += 140
n.update(x=20, y=y_max)
nodes[act['id']] = n
n.update(
id=act['id'],
color='white',
options={}
)
for color, expr in bgcolors.items():
if eval(expr, act):
n['color'] = color
for shape, expr in shapes.items():
if eval(expr, act):
n['shape'] = shape
for i, fld in enumerate(visible_node_fields):
n['options'][node_fields_string[i]] = act[fld]
_id, name = req.session.model(model).name_get([id], req.session.context)[0]
return dict(nodes=nodes,
conn=connectors,
name=name,
parent_field=graphs['node_parent_field'])
| agpl-3.0 | -5,902,369,261,231,614,000 | 38.281818 | 113 | 0.51863 | false |
mkennedy04/knodj | env/Lib/site-packages/django/utils/crypto.py | 82 | 6844 | """
Django's standard crypto functions and utilities.
"""
from __future__ import unicode_literals
import binascii
import hashlib
import hmac
import random
import struct
import time
from django.conf import settings
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.six.moves import range
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
import warnings
warnings.warn('A secure pseudo-random number generator is not available '
'on your system. Falling back to Mersenne Twister.')
using_sysrandom = False
def salted_hmac(key_salt, value, secret=None):
"""
Returns the HMAC-SHA1 of 'value', using a key generated from key_salt and a
secret (which defaults to settings.SECRET_KEY).
A different key_salt should be passed in for every application of HMAC.
"""
if secret is None:
secret = settings.SECRET_KEY
key_salt = force_bytes(key_salt)
secret = force_bytes(secret)
# We need to generate a derived key from our base key. We can do this by
# passing the key_salt and our base key through a pseudo-random function and
# SHA1 works nicely.
key = hashlib.sha1(key_salt + secret).digest()
# If len(key_salt + secret) > sha_constructor().block_size, the above
# line is redundant and could be replaced by key = key_salt + secret, since
# the hmac module does the same thing for keys longer than the block size.
# However, we need to ensure that we *always* do this.
return hmac.new(key, msg=force_bytes(value), digestmod=hashlib.sha1)
def get_random_string(length=12,
allowed_chars='abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s%s" % (
random.getstate(),
time.time(),
settings.SECRET_KEY)).encode('utf-8')
).digest())
return ''.join(random.choice(allowed_chars) for i in range(length))
if hasattr(hmac, "compare_digest"):
# Prefer the stdlib implementation, when available.
def constant_time_compare(val1, val2):
return hmac.compare_digest(force_bytes(val1), force_bytes(val2))
else:
def constant_time_compare(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths. Since Django only uses it to compare hashes of
known expected length, this is acceptable.
"""
if len(val1) != len(val2):
return False
result = 0
if six.PY3 and isinstance(val1, bytes) and isinstance(val2, bytes):
for x, y in zip(val1, val2):
result |= x ^ y
else:
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
def _bin_to_long(x):
"""
Convert a binary string into a long integer
This is a clever optimization for fast xor vector math
"""
return int(binascii.hexlify(x), 16)
def _long_to_bin(x, hex_format_string):
"""
Convert a long integer into a binary string.
hex_format_string is like "%020x" for padding 10 characters.
"""
return binascii.unhexlify((hex_format_string % x).encode('ascii'))
if hasattr(hashlib, "pbkdf2_hmac"):
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""
Implements PBKDF2 with the same API as Django's existing
implementation, using the stdlib.
This is used in Python 2.7.8+ and 3.4+.
"""
if digest is None:
digest = hashlib.sha256
if not dklen:
dklen = None
password = force_bytes(password)
salt = force_bytes(salt)
return hashlib.pbkdf2_hmac(
digest().name, password, salt, iterations, dklen)
else:
def pbkdf2(password, salt, iterations, dklen=0, digest=None):
"""
Implements PBKDF2 as defined in RFC 2898, section 5.2
HMAC+SHA256 is used as the default pseudo random function.
As of 2014, 100,000 iterations was the recommended default which took
100ms on a 2.7Ghz Intel i7 with an optimized implementation. This is
probably the bare minimum for security given 1000 iterations was
recommended in 2001. This code is very well optimized for CPython and
is about five times slower than OpenSSL's implementation. Look in
django.contrib.auth.hashers for the present default, it is lower than
the recommended 100,000 because of the performance difference between
this and an optimized implementation.
"""
assert iterations > 0
if not digest:
digest = hashlib.sha256
password = force_bytes(password)
salt = force_bytes(salt)
hlen = digest().digest_size
if not dklen:
dklen = hlen
if dklen > (2 ** 32 - 1) * hlen:
raise OverflowError('dklen too big')
l = -(-dklen // hlen)
r = dklen - (l - 1) * hlen
hex_format_string = "%%0%ix" % (hlen * 2)
inner, outer = digest(), digest()
if len(password) > inner.block_size:
password = digest(password).digest()
password += b'\x00' * (inner.block_size - len(password))
inner.update(password.translate(hmac.trans_36))
outer.update(password.translate(hmac.trans_5C))
def F(i):
u = salt + struct.pack(b'>I', i)
result = 0
for j in range(int(iterations)):
dig1, dig2 = inner.copy(), outer.copy()
dig1.update(u)
dig2.update(dig1.digest())
u = dig2.digest()
result ^= _bin_to_long(u)
return _long_to_bin(result, hex_format_string)
T = [F(x) for x in range(1, l)]
return b''.join(T) + F(l)[:r]
| mit | 7,407,117,414,099,400,000 | 35.021053 | 80 | 0.621712 | false |
gunan/tensorflow | tensorflow/python/platform/sysconfig.py | 3 | 2718 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""System configuration library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as _os_path
import platform as _platform
from tensorflow.python.framework.versions import CXX11_ABI_FLAG as _CXX11_ABI_FLAG
from tensorflow.python.framework.versions import MONOLITHIC_BUILD as _MONOLITHIC_BUILD
from tensorflow.python.framework.versions import VERSION as _VERSION
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=g-import-not-at-top
@tf_export('sysconfig.get_include')
def get_include():
"""Get the directory containing the TensorFlow C++ header files.
Returns:
The directory as string.
"""
# Import inside the function.
# sysconfig is imported from the tensorflow module, so having this
# import at the top would cause a circular import, resulting in
# the tensorflow module missing symbols that come after sysconfig.
import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'include')
@tf_export('sysconfig.get_lib')
def get_lib():
"""Get the directory containing the TensorFlow framework library.
Returns:
The directory as string.
"""
import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__))
@tf_export('sysconfig.get_compile_flags')
def get_compile_flags():
"""Get the compilation flags for custom operators.
Returns:
The compilation flags.
"""
flags = []
flags.append('-I%s' % get_include())
flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG)
return flags
@tf_export('sysconfig.get_link_flags')
def get_link_flags():
"""Get the link flags for custom operators.
Returns:
The link flags.
"""
is_mac = _platform.system() == 'Darwin'
ver = _VERSION.split('.')[0]
flags = []
if not _MONOLITHIC_BUILD:
flags.append('-L%s' % get_lib())
if is_mac:
flags.append('-ltensorflow_framework.%s' % ver)
else:
flags.append('-l:libtensorflow_framework.so.%s' % ver)
return flags
| apache-2.0 | -1,158,888,667,916,578,600 | 30.604651 | 86 | 0.699411 | false |
1013553207/django | django/utils/text.py | 308 | 14923 | from __future__ import unicode_literals
import re
import unicodedata
from gzip import GzipFile
from io import BytesIO
from django.utils import six
from django.utils.encoding import force_text
from django.utils.functional import SimpleLazyObject, allow_lazy
from django.utils.safestring import SafeText, mark_safe
from django.utils.six.moves import html_entities
from django.utils.translation import pgettext, ugettext as _, ugettext_lazy
if six.PY2:
# Import force_unicode even though this module doesn't use it, because some
# people rely on it being here.
from django.utils.encoding import force_unicode # NOQA
# Capitalizes the first letter of a string.
capfirst = lambda x: x and force_text(x)[0].upper() + force_text(x)[1:]
capfirst = allow_lazy(capfirst, six.text_type)
# Set up regular expressions
re_words = re.compile(r'<.*?>|((?:\w[-\w]*|&.*?;)+)', re.U | re.S)
re_chars = re.compile(r'<.*?>|(.)', re.U | re.S)
re_tag = re.compile(r'<(/)?([^ ]+?)(?:(\s*/)| .*?)?>', re.S)
re_newlines = re.compile(r'\r\n|\r') # Used in normalize_newlines
re_camel_case = re.compile(r'(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))')
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks. Expects that
existing line breaks are posix newlines.
All white space is preserved except added line breaks consume the space on
which they break the line.
Long words are not wrapped, so the output text may have lines longer than
``width``.
"""
text = force_text(text)
def _generator():
for line in text.splitlines(True): # True keeps trailing linebreaks
max_width = min((line.endswith('\n') and width + 1 or width), width)
while len(line) > max_width:
space = line[:max_width + 1].rfind(' ') + 1
if space == 0:
space = line.find(' ') + 1
if space == 0:
yield line
line = ''
break
yield '%s\n' % line[:space - 1]
line = line[space:]
max_width = min((line.endswith('\n') and width + 1 or width), width)
if line:
yield line
return ''.join(_generator())
wrap = allow_lazy(wrap, six.text_type)
class Truncator(SimpleLazyObject):
"""
An object used to truncate text, either by characters or words.
"""
def __init__(self, text):
super(Truncator, self).__init__(lambda: force_text(text))
def add_truncation_text(self, text, truncate=None):
if truncate is None:
truncate = pgettext(
'String to return when truncating text',
'%(truncated_text)s...')
truncate = force_text(truncate)
if '%(truncated_text)s' in truncate:
return truncate % {'truncated_text': text}
# The truncation text didn't contain the %(truncated_text)s string
# replacement argument so just append it to the text.
if text.endswith(truncate):
# But don't append the truncation text if the current text already
# ends in this.
return text
return '%s%s' % (text, truncate)
def chars(self, num, truncate=None, html=False):
"""
Returns the text truncated to be no longer than the specified number
of characters.
Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to a translatable string of an
ellipsis (...).
"""
length = int(num)
text = unicodedata.normalize('NFC', self._wrapped)
# Calculate the length to truncate to (max length - end_text length)
truncate_len = length
for char in self.add_truncation_text('', truncate):
if not unicodedata.combining(char):
truncate_len -= 1
if truncate_len == 0:
break
if html:
return self._truncate_html(length, truncate, text, truncate_len, False)
return self._text_chars(length, truncate, text, truncate_len)
chars = allow_lazy(chars)
def _text_chars(self, length, truncate, text, truncate_len):
"""
Truncates a string after a certain number of chars.
"""
s_len = 0
end_index = None
for i, char in enumerate(text):
if unicodedata.combining(char):
# Don't consider combining characters
# as adding to the string length
continue
s_len += 1
if end_index is None and s_len > truncate_len:
end_index = i
if s_len > length:
# Return the truncated string
return self.add_truncation_text(text[:end_index or 0],
truncate)
# Return the original string since no truncation was necessary
return text
def words(self, num, truncate=None, html=False):
"""
Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...).
"""
length = int(num)
if html:
return self._truncate_html(length, truncate, self._wrapped, length, True)
return self._text_words(length, truncate)
words = allow_lazy(words)
def _text_words(self, length, truncate):
"""
Truncates a string after a certain number of words.
Newlines in the string will be stripped.
"""
words = self._wrapped.split()
if len(words) > length:
words = words[:length]
return self.add_truncation_text(' '.join(words), truncate)
return ' '.join(words)
def _truncate_html(self, length, truncate, text, truncate_len, words):
"""
Truncates HTML to a certain number of chars (not counting tags and
comments), or, if words is True, then to a certain number of words.
Closes opened tags if they were correctly closed in the given HTML.
Newlines in the HTML are preserved.
"""
if words and length <= 0:
return ''
html4_singlets = (
'br', 'col', 'link', 'base', 'img',
'param', 'area', 'hr', 'input'
)
# Count non-HTML chars/words and keep note of open tags
pos = 0
end_text_pos = 0
current_len = 0
open_tags = []
regex = re_words if words else re_chars
while current_len <= length:
m = regex.search(text, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word or char
current_len += 1
if current_len == truncate_len:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or current_len >= truncate_len:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
# Element names are always case-insensitive
tagname = tagname.lower()
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag,
# all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i + 1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if current_len <= length:
return text
out = text[:end_text_pos]
truncate_text = self.add_truncation_text('', truncate)
if truncate_text:
out += truncate_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = force_text(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
get_valid_filename = allow_lazy(get_valid_filename, six.text_type)
def get_text_list(list_, last_word=ugettext_lazy('or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
'a and b'
>>> get_text_list(['a'])
'a'
>>> get_text_list([])
''
"""
if len(list_) == 0:
return ''
if len(list_) == 1:
return force_text(list_[0])
return '%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join(force_text(i) for i in list_[:-1]),
force_text(last_word), force_text(list_[-1]))
get_text_list = allow_lazy(get_text_list, six.text_type)
def normalize_newlines(text):
"""Normalizes CRLF and CR newlines to just LF."""
text = force_text(text)
return re_newlines.sub('\n', text)
normalize_newlines = allow_lazy(normalize_newlines, six.text_type)
def phone2numeric(phone):
"""Converts a phone number with letters into its numeric equivalent."""
char2number = {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3', 'f': '3',
'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5', 'm': '6',
'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7', 't': '8',
'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9'}
return ''.join(char2number.get(c, c) for c in phone.lower())
phone2numeric = allow_lazy(phone2numeric)
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
zbuf = BytesIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
class StreamingBuffer(object):
def __init__(self):
self.vals = []
def write(self, val):
self.vals.append(val)
def read(self):
if not self.vals:
return b''
ret = b''.join(self.vals)
self.vals = []
return ret
def flush(self):
return
def close(self):
return
# Like compress_string, but for iterators of strings.
def compress_sequence(sequence):
buf = StreamingBuffer()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=buf)
# Output headers...
yield buf.read()
for item in sequence:
zfile.write(item)
data = buf.read()
if data:
yield data
zfile.close()
yield buf.read()
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
['This', 'is', '"a person\\\'s"', 'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
['Another', "'person\\'s'", 'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
['A', '"\\"funky\\" style"', 'test.']
"""
text = force_text(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
def _replace_entity(match):
text = match.group(1)
if text[0] == '#':
text = text[1:]
try:
if text[0] in 'xX':
c = int(text[1:], 16)
else:
c = int(text)
return six.unichr(c)
except ValueError:
return match.group(0)
else:
try:
return six.unichr(html_entities.name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape_entities(text):
return _entity_re.sub(_replace_entity, text)
unescape_entities = allow_lazy(unescape_entities, six.text_type)
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
unescape_string_literal = allow_lazy(unescape_string_literal)
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
"""
value = force_text(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
value = re.sub('[^\w\s-]', '', value, flags=re.U).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value, flags=re.U))
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
slugify = allow_lazy(slugify, six.text_type, SafeText)
def camel_case_to_spaces(value):
"""
Splits CamelCase and converts to lower case. Also strips leading and
trailing whitespace.
"""
return re_camel_case.sub(r' \1', value).strip().lower()
| bsd-3-clause | 1,815,738,182,112,071,200 | 33.305747 | 90 | 0.562957 | false |
forge33/CouchPotatoServer | libs/subliminal/cache.py | 107 | 4619 | # -*- coding: utf-8 -*-
# Copyright 2012 Nicolas Wack <[email protected]>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
from functools import wraps
import logging
import os.path
import threading
try:
import cPickle as pickle
except ImportError:
import pickle
__all__ = ['Cache', 'cachedmethod']
logger = logging.getLogger(__name__)
class Cache(object):
"""A Cache object contains cached values for methods. It can have
separate internal caches, one for each service
"""
def __init__(self, cache_dir):
self.cache_dir = cache_dir
self.cache = defaultdict(dict)
self.lock = threading.RLock()
def __del__(self):
for service_name in self.cache:
self.save(service_name)
def cache_location(self, service_name):
return os.path.join(self.cache_dir, 'subliminal_%s.cache' % service_name)
def load(self, service_name):
with self.lock:
if service_name in self.cache:
# already loaded
return
self.cache[service_name] = defaultdict(dict)
filename = self.cache_location(service_name)
logger.debug(u'Cache: loading cache from %s' % filename)
try:
self.cache[service_name] = pickle.load(open(filename, 'rb'))
except IOError:
logger.info('Cache: Cache file "%s" doesn\'t exist, creating it' % filename)
except EOFError:
logger.error('Cache: cache file "%s" is corrupted... Removing it.' % filename)
os.remove(filename)
def save(self, service_name):
filename = self.cache_location(service_name)
logger.debug(u'Cache: saving cache to %s' % filename)
with self.lock:
pickle.dump(self.cache[service_name], open(filename, 'wb'))
def clear(self, service_name):
try:
os.remove(self.cache_location(service_name))
except OSError:
pass
self.cache[service_name] = defaultdict(dict)
def cached_func_key(self, func, cls=None):
try:
cls = func.im_class
except:
pass
return ('%s.%s' % (cls.__module__, cls.__name__), func.__name__)
def function_cache(self, service_name, func):
func_key = self.cached_func_key(func)
return self.cache[service_name][func_key]
def cache_for(self, service_name, func, args, result):
# no need to lock here, dict ops are atomic
self.function_cache(service_name, func)[args] = result
def cached_value(self, service_name, func, args):
"""Raises KeyError if not found"""
# no need to lock here, dict ops are atomic
return self.function_cache(service_name, func)[args]
def cachedmethod(function):
"""Decorator to make a method use the cache.
.. note::
This can NOT be used with static functions, it has to be used on
methods of some class
"""
@wraps(function)
def cached(*args):
c = args[0].config.cache
service_name = args[0].__class__.__name__
func_key = c.cached_func_key(function, cls=args[0].__class__)
func_cache = c.cache[service_name][func_key]
# we need to remove the first element of args for the key, as it is the
# instance pointer and we don't want the cache to know which instance
# called it, it is shared among all instances of the same class
key = args[1:]
if key in func_cache:
result = func_cache[key]
logger.debug(u'Using cached value for %s(%s), returns: %s' % (func_key, key, result))
return result
result = function(*args)
# note: another thread could have already cached a value in the
# meantime, but that's ok as we prefer to keep the latest value in
# the cache
func_cache[key] = result
return result
return cached
| gpl-3.0 | -7,303,303,058,693,914,000 | 33.470149 | 97 | 0.627625 | false |
maxamillion/ansible | lib/ansible/module_utils/compat/_selectors2.py | 79 | 23517 | # This file is from the selectors2.py package. It backports the PSF Licensed
# selectors module from the Python-3.5 stdlib to older versions of Python.
# The author, Seth Michael Larson, dual licenses his modifications under the
# PSF License and MIT License:
# https://github.com/SethMichaelLarson/selectors2#license
#
# Copyright (c) 2016 Seth Michael Larson
#
# PSF License (see licenses/PSF-license.txt or https://opensource.org/licenses/Python-2.0)
# MIT License (see licenses/MIT-license.txt or https://opensource.org/licenses/MIT)
#
# Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
# does not follow this behavior and instead returns that no file descriptor
# events have occurred rather than retry the syscall. The decision to drop
# support for select.devpoll is made to maintain 100% test coverage.
import errno
import math
import select
import socket
import sys
import time
from collections import namedtuple
from ansible.module_utils.common._collections_compat import Mapping
try:
monotonic = time.monotonic
except (AttributeError, ImportError): # Python 3.3<
monotonic = time.time
__author__ = 'Seth Michael Larson'
__email__ = '[email protected]'
__version__ = '1.1.1'
__license__ = 'MIT'
__all__ = [
'EVENT_READ',
'EVENT_WRITE',
'SelectorError',
'SelectorKey',
'DefaultSelector'
]
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
HAS_SELECT = True # Variable that shows whether the platform has a selector.
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
class SelectorError(Exception):
def __init__(self, errcode):
super(SelectorError, self).__init__()
self.errno = errcode
def __repr__(self):
return "<SelectorError errno={0}>".format(self.errno)
def __str__(self):
return self.__repr__()
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
# Python 3.5 uses a more direct route to wrap system calls to increase speed.
if sys.version_info >= (3, 5):
def _syscall_wrapper(func, _, *args, **kwargs):
""" This is the short-circuit version of the below logic
because in Python 3.5+ all selectors restart system calls. """
try:
return func(*args, **kwargs)
except (OSError, IOError, select.error) as e:
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
raise SelectorError(errcode)
else:
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
timeout = kwargs.get("timeout", None)
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
args = list(args)
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in args or kwargs to be recalculated")
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno.ETIMEDOUT)
if recalc_timeout:
if "timeout" in kwargs:
kwargs["timeout"] = expires - current_time
continue
if errcode:
raise SelectorError(errcode)
else:
raise
return result
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
# Getting the fileno of a closed socket on Windows errors with EBADF.
except socket.error as err:
if err.errno != errno.EBADF:
raise
else:
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
self._fd_to_key.pop(key.fd)
break
else:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def _select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._select, True, self._readers,
self._writers, timeout=timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
__all__.append('SelectSelector')
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
__all__.append('PollSelector')
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except SelectorError:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1e3) * 1e-3
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
__all__.append('EpollSelector')
if hasattr(select, "devpoll"):
class DevpollSelector(BaseSelector):
"""Solaris /dev/poll selector."""
def __init__(self):
super(DevpollSelector, self).__init__()
self._devpoll = select.devpoll()
def fileno(self):
return self._devpoll.fileno()
def register(self, fileobj, events, data=None):
key = super(DevpollSelector, self).register(fileobj, events, data)
poll_events = 0
if events & EVENT_READ:
poll_events |= select.POLLIN
if events & EVENT_WRITE:
poll_events |= select.POLLOUT
self._devpoll.register(key.fd, poll_events)
return key
def unregister(self, fileobj):
key = super(DevpollSelector, self).unregister(fileobj)
self._devpoll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.devpoll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._devpoll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._devpoll.close()
super(DevpollSelector, self).close()
__all__.append('DevpollSelector')
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
except SelectorError:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._wrap_control, False, [kevent], 0, 0)
except SelectorError:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._wrap_control, True,
None, max_events, timeout=timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
def _wrap_control(self, changelist, max_events, timeout):
return self._kqueue.control(changelist, max_events, timeout)
__all__.append('KqueueSelector')
# Choose the best implementation, roughly:
# kqueue == epoll == devpoll > poll > select.
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD
DefaultSelector = KqueueSelector
elif 'DevpollSelector' in globals():
DefaultSelector = DevpollSelector
elif 'EpollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = EpollSelector
elif 'PollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = PollSelector
elif 'SelectSelector' in globals(): # Platform-specific: Windows
DefaultSelector = SelectSelector
else: # Platform-specific: AppEngine
def no_selector(_):
raise ValueError("Platform does not have a selector")
DefaultSelector = no_selector
HAS_SELECT = False
| gpl-3.0 | -8,857,409,347,936,489,000 | 34.903817 | 90 | 0.546668 | false |
IONISx/edx-platform | cms/djangoapps/contentstore/management/commands/import.py | 64 | 2065 | """
Script for importing courseware from XML format
"""
from django.core.management.base import BaseCommand, CommandError, make_option
from django_comment_common.utils import (seed_permissions_roles,
are_permissions_roles_seeded)
from xmodule.modulestore.xml_importer import import_course_from_xml
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
class Command(BaseCommand):
"""
Import the specified data directory into the default ModuleStore
"""
help = 'Import the specified data directory into the default ModuleStore'
option_list = BaseCommand.option_list + (
make_option('--nostatic',
action='store_true',
help='Skip import of static content'),
)
def handle(self, *args, **options):
"Execute the command"
if len(args) == 0:
raise CommandError("import requires at least one argument: <data directory> [--nostatic] [<course dir>...]")
data_dir = args[0]
do_import_static = not options.get('nostatic', False)
if len(args) > 1:
source_dirs = args[1:]
else:
source_dirs = None
self.stdout.write("Importing. Data_dir={data}, source_dirs={courses}\n".format(
data=data_dir,
courses=source_dirs,
))
mstore = modulestore()
course_items = import_course_from_xml(
mstore, ModuleStoreEnum.UserID.mgmt_command, data_dir, source_dirs, load_error_modules=False,
static_content_store=contentstore(), verbose=True,
do_import_static=do_import_static,
create_if_not_present=True,
)
for course in course_items:
course_id = course.id
if not are_permissions_roles_seeded(course_id):
self.stdout.write('Seeding forum roles for course {0}\n'.format(course_id))
seed_permissions_roles(course_id)
| agpl-3.0 | -7,177,496,454,057,260,000 | 37.240741 | 120 | 0.629056 | false |
msingh172/youtube-dl | youtube_dl/extractor/movieclips.py | 126 | 2668 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
ExtractorError,
clean_html,
)
class MovieClipsIE(InfoExtractor):
_VALID_URL = r'https?://movieclips\.com/(?P<id>[\da-zA-Z]+)(?:-(?P<display_id>[\da-z-]+))?'
_TEST = {
'url': 'http://movieclips.com/Wy7ZU-my-week-with-marilyn-movie-do-you-love-me/',
'info_dict': {
'id': 'Wy7ZU',
'display_id': 'my-week-with-marilyn-movie-do-you-love-me',
'ext': 'mp4',
'title': 'My Week with Marilyn - Do You Love Me?',
'description': 'md5:e86795bd332fe3cff461e7c8dc542acb',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# rtmp download
'skip_download': True,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
show_id = display_id or video_id
config = self._download_xml(
'http://config.movieclips.com/player/config/%s' % video_id,
show_id, 'Downloading player config')
if config.find('./country-region').text == 'false':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, config.find('./region_alert').text), expected=True)
properties = config.find('./video/properties')
smil_file = properties.attrib['smil_file']
smil = self._download_xml(smil_file, show_id, 'Downloading SMIL')
base_url = smil.find('./head/meta').attrib['base']
formats = []
for video in smil.findall('./body/switch/video'):
vbr = int(video.attrib['system-bitrate']) / 1000
src = video.attrib['src']
formats.append({
'url': base_url,
'play_path': src,
'ext': src.split(':')[0],
'vbr': vbr,
'format_id': '%dk' % vbr,
})
self._sort_formats(formats)
title = '%s - %s' % (properties.attrib['clip_movie_title'], properties.attrib['clip_title'])
description = clean_html(compat_str(properties.attrib['clip_description']))
thumbnail = properties.attrib['image']
categories = properties.attrib['clip_categories'].split(',')
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'categories': categories,
'formats': formats,
}
| unlicense | 896,235,759,170,830,800 | 32.35 | 100 | 0.533733 | false |
402231466/40223146 | static/Brython3.1.0-20150301-090019/Lib/unittest/test/test_break.py | 785 | 8138 | import gc
import io
import os
import sys
import signal
import weakref
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=io.StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
| gpl-3.0 | -8,486,412,868,664,153,000 | 31.293651 | 79 | 0.600639 | false |
zaqwes8811/matlab_ext | code-miners/projects/mkt-processors/data_str_to_json/coursera_title_parser.py | 1 | 4890 | #-*- coding: utf-8 -*-
import sys
sys.path.append('D:/github-release')
import unittest
import json
# Other
import uasio.os_io.io_wrapper as iow
# App
import _http_requester as http_request
def _split_url(url):
""" http://www.dessci.com/en/products/mathplayer/ to
www.dessci.com /en/products/mathplayer/ - для httplib
"""
url_copy = url.replace('https://','')
url_copy = url_copy.replace('http://','')
head = url_copy.split('/')[0]
path = url_copy[len(head):]
return head+' '+path
def _purge_key(key_value):
result_key = ''
if key_value:
if key_value[0] == ' ':
result_key = key_value[1:]
return result_key
def _fill_tree_dict(lst, levels, level_pos, result_dict, head):
if level_pos == len(levels)-1:
return
foldered_list = ('@@@'.join(lst)).split(levels[level_pos])
if head:
if head[0] == ' ':
head = head[1:]
result_dict[head] = {}
for it in foldered_list:
splitted = it.split('@@@')
# Текущий заголовок
current_head = splitted[0]
# Подчищаем ключ
if current_head:
if current_head[0] == ' ':
current_head = current_head[1:]
if current_head:
result_dict[head][current_head] = {}
if level_pos != len(levels)-2:
result_dict[head][current_head] = {}
else:
# Оконечный лист
result_dict[head][current_head] = _fill_last_leaf(splitted[1:])
_fill_tree_dict(
splitted[1:],
levels,
level_pos+1,
result_dict[head], current_head)
# Переходим на следующий ярус
level_pos += 1
def _fill_last_leaf(in_list):
result_list = []
for at in in_list:
if at:
result_list.append(at)
return result_list
def _tree_walker(tree):
if 'list' in str(type(tree)):
for at in tree:
print '\t\t', at
return
# Выводим заголовки
for key in tree:
print
print key
_tree_walker(tree[key])
def main():
#fname = 'lessions_names.txt'
fname = 'lessions_html_code.txt'
sets = iow.get_utf8_template()
sets['name'] = fname
i = 0
result_list = []
readed_list = iow.file2list(sets)
for at in readed_list:
# Предварительная фильтарция
at = at.replace('</a>', '')
# Ссылки с содержанием
if 'pdf' in at or '&format=srt' in at:
at_copy = at.replace(' <a target="_new" href=', '')
#result_list.append('link_to_get '+_split_url(at_copy))
result_list.append(_split_url(at_copy))
# Темы
if 'min)' in at and 'div' not in at:
result_list.append('name_content '+at)
# Части-недели
if '(Week' in at:
at_copy_list = at.split(' ')[1].split('</h3>')[0]
result_list.append('folder '+at_copy_list)
i += 1
# теперь нужно запаковать в словарь
levels = ['folder', 'name_content', 'link_to_get']
result = {}
for at in result_list:
print at
_fill_tree_dict(result_list, levels, 0, result, 'root')
#_tree_walker(result)
# сохраняем результаты в файл
to_file = [json.dumps(result, sort_keys=True, indent=2)]
settings = {
'name': 'extracted_from_html.json',
'howOpen': 'w',
'coding': 'cp866' }
iow.list2file(settings, to_file)
settings = {
'name': 'result.list',
'howOpen': 'w',
'coding': 'cp866' }
iow.list2file(settings, result_list)
class TestSequenceFunctions(unittest.TestCase):
""" Проверка разбиения текста по маркерам """
def test_shuffle(self):
sets = iow.get_utf8_template()
sets['name'] = 'result_test.list'
readed_list = iow.file2list(sets)
# теперь нужно запаковать в словарь
levels = ['LEVEL0', 'LEVEL1', 'nothing']
result = {}
_fill_tree_dict(readed_list, levels, 0, result, 'root')
json_result = json.dumps(result['root'], sort_keys=True, indent=2)
print
print json_result
if __name__=='__main__':
#main()
suite = unittest.TestLoader().loadTestsFromTestCase(TestSequenceFunctions)
unittest.TextTestRunner(verbosity=2).run(suite)
print 'Done'
| apache-2.0 | 4,511,170,139,111,586,300 | 25.62069 | 79 | 0.519862 | false |
rs2/pandas | pandas/io/sql.py | 1 | 62655 | """
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from contextlib import contextmanager
from datetime import date, datetime, time
from functools import partial
import re
from typing import Iterator, Optional, Union, overload
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas.core.dtypes.common import is_datetime64tz_dtype, is_dict_like, is_list_like
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy # noqa: F811
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""Convert SQL and params args to DBAPI2.0 compliant format."""
args = [sql]
if params is not None:
if hasattr(params, "keys"): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _process_parse_dates_argument(parse_dates):
"""Process parse_dates argument for read_sql functions"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, "__iter__"):
parse_dates = [parse_dates]
return parse_dates
def _handle_date_column(col, utc=None, format=None):
if isinstance(format, dict):
return to_datetime(col, errors="ignore", **format)
else:
# Allow passing of formatting string for integers
# GH17855
if format is None and (
issubclass(col.dtype.type, np.floating)
or issubclass(col.dtype.type, np.integer)
):
format = "s"
if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
return to_datetime(col, errors="coerce", unit=format, utc=utc)
elif is_datetime64tz_dtype(col.dtype):
# coerce to UTC timezone
# GH11216
return to_datetime(col, utc=True)
else:
return to_datetime(col, errors="coerce", format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.items():
if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True, parse_dates=None):
"""Wrap result set of query in a DataFrame."""
frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by the
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
@overload
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or str
A database URI could be provided as as str.
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default None
List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
Examples
--------
>>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError(
"read_sql_table only supported for SQLAlchemy connectable."
)
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError as err:
raise ValueError(f"Table {table_name} not found") from err
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
if table is not None:
return table
else:
raise ValueError(f"Table {table_name} not found", con)
@overload
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : str SQL query or SQLAlchemy Selectable (select or text object)
SQL query to be executed.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC.
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
@overload
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy connectable; str
connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
try:
_is_table_name = pandas_sql.has_table(sql)
except Exception:
# using generic exception to catch errors from sql drivers (GH24988)
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
else:
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
def to_sql(
frame,
name,
con,
schema=None,
if_exists="fail",
index=True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame, Series
name : str
Name of SQL table.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : str, optional
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : str or sequence, optional
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 fallback mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
- None : Uses standard SQL ``INSERT`` clause (one per row).
- 'multi': Pass multiple values in a single ``INSERT`` clause.
- callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if if_exists not in ("fail", "replace", "append"):
raise ValueError(f"'{if_exists}' is not valid for if_exists")
pandas_sql = pandasSQL_builder(con, schema=schema)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError(
"'frame' argument should be either a Series or a DataFrame"
)
pandas_sql.to_sql(
frame,
name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def has_table(table_name, con, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table.
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it.
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, str):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(con, schema=None, meta=None, is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters.
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, str):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type conversions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(
self,
name,
pandas_sql_engine,
frame=None,
index=True,
if_exists="fail",
prefix="pandas",
index_label=None,
schema=None,
keys=None,
dtype=None,
):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError(f"Could not init table '{name}'")
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == "fail":
raise ValueError(f"Table '{self.name}' already exists.")
elif self.if_exists == "replace":
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == "append":
pass
else:
raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
else:
self._execute_create()
def _execute_insert(self, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : generator of list
Each item contains a list of values to be inserted
"""
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(), data)
def _execute_insert_multi(self, conn, keys, data_iter):
"""
Alternative to _execute_insert for DBs support multivalue INSERT.
Note: multi-value insert is usually faster for analytics DBs
and tables containing a few columns
but performance degrades quickly with increase of columns.
"""
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(data))
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(f"duplicate name in index/columns: {err}") from err
else:
temp = self.frame
column_names = list(map(str, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
for i, (_, ser) in enumerate(temp.items()):
vals = ser._values
if vals.dtype.kind == "M":
d = vals.to_pydatetime()
elif vals.dtype.kind == "m":
# store as integers, see GH#6921, GH#7076
d = vals.view("i8").astype(object)
else:
d = vals.astype(object)
assert isinstance(d, np.ndarray), type(d)
if ser._can_hold_na:
# Note: this will miss timedeltas since they are converted to int
mask = isna(d)
d[mask] = None
data_list[i] = d
return column_names, data_list
def insert(self, chunksize=None, method=None):
# set insert method
if method is None:
exec_insert = self._execute_insert
elif method == "multi":
exec_insert = self._execute_insert_multi
elif callable(method):
exec_insert = partial(method, self)
else:
raise ValueError(f"Invalid parameter `method`: {method}")
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError("chunksize argument should be non-zero")
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
exec_insert(conn, keys, chunk_iter)
def _query_iterator(
self, result, chunksize, columns, coerce_float=True, parse_dates=None
):
"""Return generator through chunked result set."""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
for idx in self.index[::-1]:
cols.insert(0, self.table.c[idx])
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
column_names,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
f"levels, which is {nlevels}"
)
else:
return index_label
# return the used column labels for the index columns
if (
nlevels == 1
and "index" not in self.frame.columns
and self.frame.index.name is None
):
return ["index"]
else:
return [
l if l is not None else f"level_{i}"
for i, l in enumerate(self.frame.index.names)
]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, str):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(self.frame.index._get_level_values(i))
column_names_and_types.append((str(idx_label), idx_type, True))
column_names_and_types += [
(str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Column, PrimaryKeyConstraint, Table
column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
columns = [
Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types
]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# Handle date parsing upfront; don't try to convert columns
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (
col_type is datetime
or col_type is date
or col_type is DatetimeTZDtype
):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype("int64") or col_type is bool:
self.frame[col_name] = df_col.astype(col_type, copy=False)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
from sqlalchemy.types import (
TIMESTAMP,
BigInteger,
Boolean,
Date,
DateTime,
Float,
Integer,
Text,
Time,
)
if col_type == "datetime64" or col_type == "datetime":
# GH 9086: TIMESTAMP is the suggested type if the column contains
# timezone information
try:
if col.dt.tz is not None:
return TIMESTAMP(timezone=True)
except AttributeError:
# The column is actually a DatetimeIndex
# GH 26761 or an Index with date-like data e.g. 9999-01-01
if getattr(col, "tz", None) is not None:
return TIMESTAMP(timezone=True)
return DateTime
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
return BigInteger
elif col_type == "floating":
if col.dtype == "float32":
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == "integer":
if col.dtype == "int32":
return Integer
else:
return BigInteger
elif col_type == "boolean":
return Boolean
elif col_type == "date":
return Date
elif col_type == "time":
return Time
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import TIMESTAMP, Boolean, Date, DateTime, Float, Integer
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype("int64")
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql.
"""
def read_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
def to_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
class SQLDatabase(PandasSQL):
"""
This class enables conversion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction.
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, "execute"):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execution_options(no_parameters=True).execute(
*args, **kwargs
)
def read_table(
self,
table_name,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
schema=None,
chunksize=None,
):
"""
Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
@staticmethod
def _query_iterator(
result, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None
):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
def read_query(
self,
sql,
index_col=None,
coerce_float=True,
parse_dates=None,
params=None,
chunksize=None,
):
"""
Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = result.fetchall()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
return frame
read_sql = read_query
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype=None,
method=None,
):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
method : {None', 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import TypeEngine, to_instance
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError(f"The type of {col} is not a SQLAlchemy type")
table = SQLTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
schema=schema,
dtype=dtype,
)
table.create()
from sqlalchemy import exc
try:
table.insert(chunksize, method=method)
except exc.SQLAlchemyError as err:
# GH34431
msg = "(1054, \"Unknown column 'inf' in 'field list'\")"
err_text = str(err.orig)
if re.search(msg, err_text):
raise ValueError("inf cannot be used with MySQL") from err
else:
raise err
if not name.isdigit() and not name.islower():
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema, connection=conn
)
if name not in table_names:
msg = (
f"The provided table name '{name}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table, name, schema or self.meta.schema
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get(".".join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(
table_name, self, frame=frame, index=False, keys=keys, dtype=dtype
)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
"string": "TEXT",
"floating": "REAL",
"integer": "INTEGER",
"datetime": "TIMESTAMP",
"date": "DATE",
"time": "TIME",
"boolean": "INTEGER",
}
def _get_unicode_name(name):
try:
uname = str(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError as err:
raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
return uname
def _get_valid_sqlite_name(name):
# See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError("SQLite identifier cannot contain NULs")
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = (
"The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to underscores."
)
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super().__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self, *, num_rows):
names = list(map(str, self.frame.columns))
wld = "?" # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
for idx in self.index[::-1]:
names.insert(0, idx)
bracketed_names = [escape(column) for column in names]
col_names = ",".join(bracketed_names)
row_wildcards = ",".join([wld] * len(names))
wildcards = ",".join(f"({row_wildcards})" for _ in range(num_rows))
insert_statement = (
f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(num_rows=1), data_list)
def _execute_insert_multi(self, conn, keys, data_iter):
data_list = list(data_iter)
flattened_data = [x for row in data_list for x in row]
conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
pat = re.compile(r"\s+")
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [
escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join(escape(c) for c in keys)
create_tbl_stmts.append(
f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
)
create_stmts = [
"CREATE TABLE "
+ escape(self.name)
+ " (\n"
+ ",\n ".join(create_tbl_stmts)
+ "\n)"
]
ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join(escape(c) for c in ix_cols)
create_stmts.append(
"CREATE INDEX "
+ escape("ix_" + self.name + "_" + cnames)
+ "ON "
+ escape(self.name)
+ " ("
+ cnames_br
+ ")"
)
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support SQLite connections (fallback without
SQLAlchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, is_cursor=False):
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except Exception:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
cur.execute(*args, **kwargs)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception as inner_exc: # pragma: no cover
ex = DatabaseError(
f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
)
raise ex from inner_exc
ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
raise ex from exc
@staticmethod
def _query_iterator(
cursor, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None
):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
def read_query(
self,
sql,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize=None,
):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(
cursor,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype=None,
method=None,
):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: string
Name of SQL table.
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if it does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatibility with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
method : {None, 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError(f"{col} ({my_type}) not a string")
table = SQLiteTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
dtype=dtype,
)
table.create()
table.insert(chunksize, method)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = "?"
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
return len(self.execute(query, [name]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(
table_name, self, frame=frame, index=False, keys=keys, dtype=dtype
)
return str(table.sql_schema())
def get_schema(frame, name, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
| bsd-3-clause | 1,203,945,829,041,733,400 | 32.398188 | 88 | 0.582348 | false |
JuanjoA/l10n-spain | l10n_es_aeat_mod340/__init__.py | 14 | 1171 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 20011 Ting (http://www.ting.es)
# Copyright (c) 2011-2013 Acysos S.L. (http://acysos.com)
# Ignacio Ibeas Izquierdo <[email protected]>
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import report
from . import wizard
from . import models
| agpl-3.0 | 1,647,807,216,550,706,700 | 44.038462 | 78 | 0.608027 | false |
jaredly/django-feedback | test_settings.py | 2 | 5566 | # Django settings for demoproject project.
from os.path import abspath, dirname, join
demoproject_dir = dirname(abspath(__file__))
DEBUG = True
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.sqlite3', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'k9i055*z@6@9$7xvyw(8y4sk_w0@1ltf2$y-^zu^&asfasfww'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'feedback.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'demoproject.wsgi.application'
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'feedback',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
FEEDBACK_EMAIL = "[email protected]"
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '\033[22;32m%(levelname)s\033[0;0m %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'javascript_error': {
'handlers': ['mail_admins', 'console'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | -1,599,479,976,711,509,800 | 32.53012 | 108 | 0.651096 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.